diff --git a/cmd/list/list.go b/cmd/list/list.go index f1abf1a06..350192425 100644 --- a/cmd/list/list.go +++ b/cmd/list/list.go @@ -4,12 +4,20 @@ package list import ( + "encoding/json" + "fmt" + "io" + "github.com/cilium/hubble/cmd/common/config" "github.com/cilium/hubble/cmd/common/template" "github.com/spf13/cobra" "github.com/spf13/viper" ) +var listOpts struct { + output string +} + // New creates a new list command. func New(vp *viper.Viper) *cobra.Command { listCmd := &cobra.Command{ @@ -23,6 +31,16 @@ func New(vp *viper.Viper) *cobra.Command { listCmd.AddCommand( newNodeCommand(vp), + newNamespacesCommand(vp), ) return listCmd } + +func jsonOutput(buf io.Writer, v interface{}) error { + bs, err := json.MarshalIndent(v, "", " ") + if err != nil { + return err + } + _, err = fmt.Fprintln(buf, string(bs)) + return err +} diff --git a/cmd/list/namespace_test.go b/cmd/list/namespace_test.go new file mode 100644 index 000000000..d1b162d2c --- /dev/null +++ b/cmd/list/namespace_test.go @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +package list + +import ( + "bytes" + "testing" + + observerpb "github.com/cilium/cilium/api/v1/observer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNamespaceOutputs(t *testing.T) { + var testCases = []struct { + name string + namespaces []*observerpb.Namespace + expectedJSONOutput string + expectedTableOutput string + expectedWideTableOutput string + }{ + { + name: "multiple namespaces no cluster", + namespaces: []*observerpb.Namespace{ + {Namespace: "foo"}, + {Namespace: "bar"}, + {Namespace: "baz"}, + {Namespace: "faz"}, + }, + expectedJSONOutput: `[ + { + "namespace": "foo" + }, + { + "namespace": "bar" + }, + { + "namespace": "baz" + }, + { + "namespace": "faz" + } +] +`, + expectedTableOutput: `NAMESPACE +foo +bar +baz +faz +`, + expectedWideTableOutput: `NAMESPACE CLUSTER +foo N/A +bar N/A +baz N/A +faz N/A +`, + }, + { + name: "multiple namespaces with cluster", + namespaces: []*observerpb.Namespace{ + {Namespace: "foo", Cluster: "cluster-1"}, + {Namespace: "bar", Cluster: "cluster-1"}, + {Namespace: "baz", Cluster: "cluster-2"}, + {Namespace: "faz", Cluster: "cluster-2"}, + }, + expectedJSONOutput: `[ + { + "cluster": "cluster-1", + "namespace": "foo" + }, + { + "cluster": "cluster-1", + "namespace": "bar" + }, + { + "cluster": "cluster-2", + "namespace": "baz" + }, + { + "cluster": "cluster-2", + "namespace": "faz" + } +] +`, + expectedTableOutput: `NAMESPACE +foo +bar +baz +faz +`, + expectedWideTableOutput: `NAMESPACE CLUSTER +foo cluster-1 +bar cluster-1 +baz cluster-2 +faz cluster-2 +`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + defer func() { + listOpts.output = "" + }() + buf := bytes.Buffer{} + + // json + require.NoError(t, jsonOutput(&buf, tc.namespaces), tc.name) + assert.Equal(t, tc.expectedJSONOutput, buf.String(), "json %s", tc.name) + + // regular table + buf.Reset() + require.NoError(t, namespaceTableOutput(&buf, tc.namespaces), tc.name) + assert.Equal(t, tc.expectedTableOutput, buf.String(), "regular table %s", tc.name) + + // wide table + listOpts.output = "wide" + buf.Reset() + require.NoError(t, namespaceTableOutput(&buf, tc.namespaces), tc.name) + assert.Equal(t, tc.expectedWideTableOutput, buf.String(), "wide table %s", tc.name) + }) + } +} diff --git a/cmd/list/namespaces.go b/cmd/list/namespaces.go new file mode 100644 index 000000000..cdbb718b7 --- /dev/null +++ b/cmd/list/namespaces.go @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + +package list + +import ( + "context" + "fmt" + "io" + "text/tabwriter" + + observerpb "github.com/cilium/cilium/api/v1/observer" + "github.com/cilium/hubble/cmd/common/config" + "github.com/cilium/hubble/cmd/common/conn" + "github.com/cilium/hubble/cmd/common/template" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "github.com/spf13/viper" + "google.golang.org/grpc" +) + +func newNamespacesCommand(vp *viper.Viper) *cobra.Command { + namespacesCmd := &cobra.Command{ + Use: "namespaces", + Short: "List namespaces with recent flows", + RunE: func(cmd *cobra.Command, _ []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hubbleConn, err := conn.New(ctx, vp.GetString(config.KeyServer), vp.GetDuration(config.KeyTimeout)) + if err != nil { + return err + } + defer hubbleConn.Close() + return runListNamespaces(ctx, cmd, hubbleConn) + }, + } + + // formatting flags + formattingFlags := pflag.NewFlagSet("Formatting", pflag.ContinueOnError) + formattingFlags.StringVarP( + &listOpts.output, "output", "o", "table", + `Specify the output format, one of: + json: JSON encoding + table: Tab-aligned columns + wide: Tab-aligned columns with additional information`) + namespacesCmd.Flags().AddFlagSet(formattingFlags) + + // advanced completion for flags + namespacesCmd.RegisterFlagCompletionFunc("output", func(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{ + "json", + "table", + "wide", + }, cobra.ShellCompDirectiveDefault + }) + + template.RegisterFlagSets(namespacesCmd, formattingFlags, config.ServerFlags) + return namespacesCmd +} + +func runListNamespaces(ctx context.Context, cmd *cobra.Command, conn *grpc.ClientConn) error { + req := &observerpb.GetNamespacesRequest{} + res, err := observerpb.NewObserverClient(conn).GetNamespaces(ctx, req) + if err != nil { + return err + } + + namespaces := res.GetNamespaces() + switch listOpts.output { + case "json": + return jsonOutput(cmd.OutOrStdout(), namespaces) + case "table", "wide": + return namespaceTableOutput(cmd.OutOrStdout(), namespaces) + default: + return fmt.Errorf("unknown output format: %s", listOpts.output) + } +} + +func namespaceTableOutput(buf io.Writer, namespaces []*observerpb.Namespace) error { + tw := tabwriter.NewWriter(buf, 2, 0, 3, ' ', 0) + // header + fmt.Fprint(tw, "NAMESPACE") + if listOpts.output == "wide" { + fmt.Fprint(tw, "\tCLUSTER") + } + fmt.Fprintln(tw) + + // contents + for _, ns := range namespaces { + fmt.Fprint(tw, ns.Namespace) + if listOpts.output == "wide" { + fmt.Fprint(tw, "\t", ns.Cluster) + } + fmt.Fprintln(tw) + } + return tw.Flush() +} diff --git a/cmd/list/node.go b/cmd/list/node.go index 6e0cf10b3..67808400d 100644 --- a/cmd/list/node.go +++ b/cmd/list/node.go @@ -5,7 +5,6 @@ package list import ( "context" - "encoding/json" "fmt" "io" "sort" @@ -26,10 +25,6 @@ import ( const notAvailable = "N/A" -var listOpts struct { - output string -} - func newNodeCommand(vp *viper.Viper) *cobra.Command { listCmd := &cobra.Command{ Use: "nodes", @@ -85,13 +80,13 @@ func runListNodes(ctx context.Context, cmd *cobra.Command, conn *grpc.ClientConn case "json": return jsonOutput(cmd.OutOrStdout(), nodes) case "table", "wide": - return tableOutput(cmd.OutOrStdout(), nodes) + return nodeTableOutput(cmd.OutOrStdout(), nodes) default: return fmt.Errorf("unknown output format: %s", listOpts.output) } } -func tableOutput(buf io.Writer, nodes []*observerpb.Node) error { +func nodeTableOutput(buf io.Writer, nodes []*observerpb.Node) error { tw := tabwriter.NewWriter(buf, 2, 0, 3, ' ', 0) fmt.Fprint(tw, "NAME\tSTATUS\tAGE\tFLOWS/S\tCURRENT/MAX-FLOWS") if listOpts.output == "wide" { @@ -130,15 +125,6 @@ func tableOutput(buf io.Writer, nodes []*observerpb.Node) error { return tw.Flush() } -func jsonOutput(buf io.Writer, nodes []*observerpb.Node) error { - bs, err := json.MarshalIndent(nodes, "", " ") - if err != nil { - return err - } - _, err = fmt.Fprintln(buf, string(bs)) - return err -} - func nodeStateToString(state relaypb.NodeState) string { switch state { case relaypb.NodeState_NODE_CONNECTED: diff --git a/cmd/list/node_test.go b/cmd/list/node_test.go index 999cb0928..13f206c54 100644 --- a/cmd/list/node_test.go +++ b/cmd/list/node_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestOutputs(t *testing.T) { +func TestNodeOutputs(t *testing.T) { var testCases = []struct { name string nodes []*observerpb.Node @@ -186,13 +186,13 @@ foo Connected 16m40s 0.00 500/1000 ( 50.00%) 1 1.2.3.4 E // regular table buf.Reset() - require.NoError(t, tableOutput(&buf, tc.nodes), tc.name) + require.NoError(t, nodeTableOutput(&buf, tc.nodes), tc.name) assert.Equal(t, tc.expectedTableOutput, buf.String(), "regular table %s", tc.name) // wide table listOpts.output = "wide" buf.Reset() - require.NoError(t, tableOutput(&buf, tc.nodes), tc.name) + require.NoError(t, nodeTableOutput(&buf, tc.nodes), tc.name) assert.Equal(t, tc.expectedWideTableOutput, buf.String(), "wide table %s", tc.name) }) } diff --git a/cmd/observe/io_reader_observer.go b/cmd/observe/io_reader_observer.go index 535cc370a..d10d6e066 100644 --- a/cmd/observe/io_reader_observer.go +++ b/cmd/observe/io_reader_observer.go @@ -55,6 +55,11 @@ func (o *IOReaderObserver) GetNodes(_ context.Context, _ *observerpb.GetNodesReq return nil, status.Errorf(codes.Unimplemented, "GetNodes not implemented") } +// GetNamespaces is not implemented, and will throw an error if used. +func (o *IOReaderObserver) GetNamespaces(_ context.Context, _ *observerpb.GetNamespacesRequest, _ ...grpc.CallOption) (*observerpb.GetNamespacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "GetNamespaces not implemented") +} + // ServerStatus is not implemented, and will throw an error if used. func (o *IOReaderObserver) ServerStatus(_ context.Context, _ *observerpb.ServerStatusRequest, _ ...grpc.CallOption) (*observerpb.ServerStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "ServerStatus not implemented") diff --git a/go.mod b/go.mod index 9eb986919..806a3edb2 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/cilium/hubble go 1.20 require ( - github.com/cilium/cilium v1.14.0-snapshot.1 + github.com/cilium/cilium v1.14.0-snapshot.3.0.20230614154348-87ac446b902c github.com/fatih/color v1.15.0 github.com/google/go-cmp v0.5.9 github.com/sirupsen/logrus v1.9.3 @@ -19,23 +19,19 @@ require ( ) require ( - github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect - github.com/beorn7/perks v1.0.1 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-logr/logr v1.2.4 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/analysis v0.21.4 // indirect github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect github.com/go-openapi/loads v0.21.2 // indirect - github.com/go-openapi/runtime v0.25.0 // indirect - github.com/go-openapi/spec v0.20.8 // indirect - github.com/go-openapi/strfmt v0.21.5 // indirect + github.com/go-openapi/spec v0.20.9 // indirect + github.com/go-openapi/strfmt v0.21.7 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/validate v0.22.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -50,45 +46,37 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/oklog/ulid v1.3.1 // indirect - github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect - github.com/shirou/gopsutil/v3 v3.23.2 // indirect + github.com/shirou/gopsutil/v3 v3.23.5 // indirect github.com/spf13/afero v1.9.5 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect github.com/tklauser/go-sysconf v0.3.11 // indirect github.com/tklauser/numcpus v0.6.0 // indirect - github.com/vishvananda/netlink v1.2.1-beta.2.0.20220608195807-1a118fe229fc // indirect + github.com/vishvananda/netlink v1.2.1-beta.2.0.20230420174744-55c8b9515a01 // indirect github.com/vishvananda/netns v0.0.4 // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.mongodb.org/mongo-driver v1.10.0 // indirect - go.opentelemetry.io/otel v1.14.0 // indirect - go.opentelemetry.io/otel/trace v1.14.0 // indirect - golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect + github.com/yusufpapurcu/wmi v1.2.3 // indirect + go.mongodb.org/mongo-driver v1.11.3 // indirect + golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/sync v0.1.0 // indirect + golang.org/x/sync v0.2.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/apimachinery v0.26.0 // indirect - k8s.io/client-go v0.26.0 // indirect - k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/apimachinery v0.26.3 // indirect + k8s.io/client-go v0.26.3 // indirect + k8s.io/klog/v2 v2.100.1 // indirect k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect diff --git a/go.sum b/go.sum index f0f8d2bc0..6ab8fb5c9 100644 --- a/go.sum +++ b/go.sum @@ -40,30 +40,20 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= -github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/cilium v1.14.0-snapshot.1 h1:m0UiMPWV7dWj/wnLpV9gwMdpHNVjHIHYdVmTySZ/Mdw= -github.com/cilium/cilium v1.14.0-snapshot.1/go.mod h1:3y/te/G4DtMO/bYz6DRUuYLOvy8d0A/XbsVytqqUfOs= +github.com/cilium/checkmate v1.0.3 h1:CQC5eOmlAZeEjPrVZY3ZwEBH64lHlx9mXYdUehEwI5w= +github.com/cilium/cilium v1.14.0-snapshot.3.0.20230614154348-87ac446b902c h1:fB7RqP9F0ZSBufmA0GLzcWGmRv/070xqDpYT8C5rdPM= +github.com/cilium/cilium v1.14.0-snapshot.3.0.20230614154348-87ac446b902c/go.mod h1:3zQ3hSxQPQDX/AfJ9rsnujcrhxutzTjdo6Gj536lUP0= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -87,20 +77,10 @@ github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbS github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= @@ -120,17 +100,16 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc= -github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs= +github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= -github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= +github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.5 h1:Z/algjpXIZpbvdN+6KbVTkpO75RuedMrqpn1GN529h4= -github.com/go-openapi/strfmt v0.21.5/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= +github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= +github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= @@ -163,7 +142,6 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -193,7 +171,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -247,16 +224,10 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -264,9 +235,7 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -289,9 +258,7 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2 h1:hAHbPm5IJGijwng3PWk09JkG9WeqChjprR5s9bBZ+OM= -github.com/matttproud/golang_protobuf_extensions v1.0.2/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -299,18 +266,13 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= -github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= @@ -324,32 +286,11 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -357,13 +298,13 @@ github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZV github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= -github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/shirou/gopsutil/v3 v3.23.5 h1:5SgDCeQ0KW0S4N0znjeM/eFHXXOKyv2dVNgRq/c9P6Y= +github.com/shirou/gopsutil/v3 v3.23.5/go.mod h1:Ng3Maa27Q2KARVJ0SPZF5NdrQSC3XHKP8IIWrHgMeLY= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= @@ -393,7 +334,6 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= @@ -405,8 +345,8 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20220608195807-1a118fe229fc h1:2wzJ1cBcM23GetRJs2y6ETXrFMvp6HefTbFWtqviHZQ= -github.com/vishvananda/netlink v1.2.1-beta.2.0.20220608195807-1a118fe229fc/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.2.1-beta.2.0.20230420174744-55c8b9515a01 h1:F9xjJm4IH8VjcqG4ujciOF+GIM4mjPkHhWLLzOghPtM= +github.com/vishvananda/netlink v1.2.1-beta.2.0.20230420174744-55c8b9515a01/go.mod h1:cAAsePK2e15YDAMJNyOpGYEWNe4sIghTY7gpz4cX/Ik= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= @@ -420,23 +360,24 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= +go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y= +go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= -go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= -go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs= -go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= -go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8= +go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= +go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= +go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= +go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= @@ -458,8 +399,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug= -golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= +golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -485,7 +426,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -493,7 +433,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -517,10 +456,7 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -532,8 +468,6 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -546,11 +480,10 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -567,7 +500,6 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -581,9 +513,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -592,24 +521,21 @@ golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220804214406-8e32c043e418/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -745,8 +671,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc h1:XSJ8Vk1SWuNr8S18z1NZSziL0CPIXLCCMDOEFtHBOFc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -779,10 +705,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -790,12 +714,8 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -811,12 +731,12 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= -k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= -k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= +k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= +k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= +k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go index 46ecfc84a..c9c4fac06 100644 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ b/vendor/github.com/asaskevich/govalidator/validator.go @@ -454,27 +454,26 @@ func IsCreditCard(str string) bool { if !rxCreditCard.MatchString(sanitized) { return false } + + number, _ := ToInt(sanitized) + number, lastDigit := number / 10, number % 10 + var sum int64 - var digit string - var tmpNum int64 - var shouldDouble bool - for i := len(sanitized) - 1; i >= 0; i-- { - digit = sanitized[i:(i + 1)] - tmpNum, _ = ToInt(digit) - if shouldDouble { - tmpNum *= 2 - if tmpNum >= 10 { - sum += (tmpNum % 10) + 1 - } else { - sum += tmpNum + for i:=0; number > 0; i++ { + digit := number % 10 + + if i % 2 == 0 { + digit *= 2 + if digit > 9 { + digit -= 9 } - } else { - sum += tmpNum } - shouldDouble = !shouldDouble + + sum += digit + number = number / 10 } - - return sum%10 == 0 + + return (sum + lastDigit) % 10 == 0 } // IsISBN10 checks if the string is an ISBN version 10. diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177be6..000000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7..000000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8eb..000000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b53065f..000000000 --- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 8bf0e5b78..000000000 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# xxhash - -[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) -[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh deleted file mode 100644 index 94b9c4439..000000000 --- a/vendor/github.com/cespare/xxhash/v2/testall.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -eu -o pipefail - -# Small convenience script for running the tests with various combinations of -# arch/tags. This assumes we're running on amd64 and have qemu available. - -go test ./... -go test -tags purego ./... -GOARCH=arm64 go test -GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index a9e0d45c9..000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,228 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index 3e8b13257..000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,209 +0,0 @@ -//go:build !appengine && gc && !purego -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s deleted file mode 100644 index 7e3145a22..000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s +++ /dev/null @@ -1,183 +0,0 @@ -//go:build !appengine && gc && !purego -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD d+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go deleted file mode 100644 index 9216e0a40..000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 26df13bba..000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego -// +build !amd64,!arm64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index e86f1b5fd..000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build appengine -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 1c1638fd8..000000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build !appengine -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "unsafe" -) - -// In the future it's possible that compiler optimizations will make these -// XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. -// If that happens, even if we keep these functions they can be replaced with -// the trivial safe code. - -// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is: -// -// var b []byte -// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) -// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data -// bh.Len = len(s) -// bh.Cap = len(s) -// -// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough -// weight to this sequence of expressions that any function that uses it will -// not be inlined. Instead, the functions below use a different unsafe -// conversion designed to minimize the inliner weight and allow both to be -// inlined. There is also a test (TestInlining) which verifies that these are -// inlined. -// -// See https://github.com/golang/go/issues/42739 for discussion. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))) - // d.Write always returns len(s), nil. - // Ignoring the return output and returning these fixed values buys a - // savings of 6 in the inliner's cost model. - return len(s), nil -} - -// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout -// of the first two words is the same as the layout of a string. -type sliceHeader struct { - s string - cap int -} diff --git a/vendor/github.com/cilium/cilium/AUTHORS b/vendor/github.com/cilium/cilium/AUTHORS index 1e1cb21f5..2b08784c6 100644 --- a/vendor/github.com/cilium/cilium/AUTHORS +++ b/vendor/github.com/cilium/cilium/AUTHORS @@ -10,7 +10,9 @@ Adam Wolfe Gordon awg@digitalocean.com adamzhoul adamzhoul186@gmail.com Aditi Ghag aditi@cilium.io Aditya Purandare aditya.p1993@hotmail.com +Aditya Sharma aditya.sharma@shopify.com Adrien Trouillaud adrienjt@users.noreply.github.com +Ahmed Bebars 1381372+abebars@users.noreply.github.com Akhil Velagapudi 4@4khil.com Akshat Agarwal humancalico@disroot.org Alan Kutniewski kutniewski@google.com @@ -35,6 +37,7 @@ Andree Klattenhoff mail@andr.ee André Martins andre@cilium.io Andrew Bulford andrew.bulford@form3.tech Andrew Holt andrew.holt@utmost.co +Andrew Sauber 2046750+asauber@users.noreply.github.com Andrew Sy Kim kim.andrewsy@gmail.com Andrey Devyatkin andrey.devyatkin@fivexl.io Andrey Klimentyev andrey.klimentyev@flant.com @@ -57,7 +60,7 @@ Arnaud Meukam ameukam@gmail.com Arthur Chiao arthurchiao@hotmail.com ArthurChiao arthurchiao@hotmail.com Arthur Evstifeev mail@ap4y.me -Arthur Outhenin-Chalandre arthur.outhenin-chalandre@proton.ch +Arthur Outhenin-Chalandre arthur@cri.epita.fr Arvind Soni arvind@covalent.io Ashray Jain ashrayj@palantir.com Ashwin Paranjpe ashwin@covalent.io @@ -65,10 +68,12 @@ Assiya Khuzyakhmetova assiya.khuzyakhmetova@nu.edu.kz Atkins Chang atkinschang@gmail.com Augustas Berneckas a.berneckas@gmail.com Austin Cawley-Edwards austin.cawley@gmail.com +ayesha khaliq ayeshakhaliqrana@gmail.com Ayush Dwivedi ayush.dwivedi@accuknox.com Barun Acharya barun1024@gmail.com Basit Mustafa basit.mustafa@gmail.com Beatriz Martínez beatriz@isovalent.com +Benjamin Leggett benjamin.leggett@solo.io Benjamin Pineau benjamin.pineau@datadoghq.com Bill Mulligan billmulligan516@gmail.com Bingshen Wang bingshen.wbs@alibaba-inc.com @@ -78,6 +83,7 @@ Bob Bouteillier bob.bouteillier@datadoghq.com Bokang Li libokang.dev@gmail.com Bolun Zhao blzhao@google.com Boran Car boran.car@gmail.com +Boris Petrovic carnerito.b@gmail.com Brad Whitfield bradswhitfield@gmail.com Brandon McNama brandonmcnama@outlook.com Brian Topping brian@coglative.com @@ -104,6 +110,7 @@ Christian Hüning christian.huening@finleap.com Christine Chen christine.chen@datadoghq.com Christopher Biscardi chris@christopherbiscardi.com Christopher Schmidt fakod666@gmail.com +Chris Werner Rau cwrau@cwrau.info Cilium Imagebot noreply@cilium.io Cintia Sanchez Garcia cynthiasg@icloud.com Claudia J. Kang claudiajkang@gmail.com @@ -113,20 +120,24 @@ cndoit18 cndoit18@outlook.com Connor Jones cj@cjmakes.com Cookie Wang luckymrwang@163.com Craig Box craig.box@gmail.com +cui fliter imcusg@gmail.com Cynthia Thomas cynthia@covalent.io Cyril Corbon corboncyril@gmail.com Cyril Scetbon cscetbon@gmail.com Dale Ragan dale.ragan@sap.com Dalton Hubble dghubble@gmail.com +Daneyon Hansen daneyon.hansen@solo.io Đặng Minh Dũng dungdm93@live.com Daniel Borkmann daniel@iogearbox.net Daniel Dao dqminh89@gmail.com +Daniel Hawton daniel.hawton@solo.io Daniel Qian qsj.daniel@gmail.com Daniel T. Lee danieltimlee@gmail.com Danni Skov Høglund skuffe@pwnz.dk Dan Sexton dan.b.sexton@gmail.com Dan Wendlandt dan@covalent.io Dario Mader 9934402+darox@users.noreply.github.com +darox maderdario@gmail.com Darren Foo darren.foo@shopify.com Darren Mackintosh unixdaddy@gmail.com Darshan Chaudhary deathbullet@gmail.com @@ -149,6 +160,7 @@ Devarshi Sathiya devarshisathiya5@gmail.com Dharma Bellamkonda dharma.bellamkonda@gmail.com Didier Durand durand.didier@gmail.com Diego Casati diego.casati@gmail.com +Dima Pugachev krabradosty@gmail.com Divine Odazie dodazie@gmail.com Divya Mohan divya.mohan0209@gmail.com Divyansh Kamboj divyansh.kamboj@accuknox.com @@ -186,7 +198,8 @@ Fankaixi Li fankaixi.li@bytedance.com Federico Hernandez f@ederi.co Felix Färjsjö felix.farjsjo@gmail.com fengshunli 1171313930@qq.com -Feroz Salam feroz@argh.in +Fernand Galiana fernand.galiana@gmail.com +Feroz Salam feroz@isovalent.com FeynmanZhou pengfeizhou@yunify.com Fish-pro zechun.chen@daocloud.io Florian Koch f0@users.noreply.github.com @@ -221,6 +234,7 @@ Han Zhou hzhou8@ebay.com HaoTian Qi t117503445@gmail.com Hao Zhang hao.zhang.am.i@gmail.com Harsh Modi harshmodi@google.com +harsimran pabla hpabla@isovalent.com Hart Hoover hart.hoover@gmail.com Heiko Rothe me@heikorothe.com Hemanth Malla hemanth.malla@datadoghq.com @@ -238,6 +252,7 @@ Ivan Makarychev i.makarychev@tinkoff.ru Ivar Lazzaro ivarlazzaro@gmail.com Jaff Cheng jaff.cheng.sh@gmail.com Jaime Caamaño Ruiz jcaamano@suse.com +James Brookes jbrookes@confluent.io James McShane james.mcshane@superorbital.io Jan-Erik Rediger janerik@fnordig.de Jarno Rajahalme jarno@isovalent.com @@ -258,6 +273,7 @@ jiuker 2818723467@qq.com Joao Ubaldo me@joaoubaldo.com Joao Victorino joao@accuknox.com Joe Farrell joe2farrell@gmail.com +Joe Stevens joe@ascend.io Joe Stringer joe@cilium.io Joe Talerico rook@isovalent.com Joey Espinosa jlouis.espinosa@gmail.com @@ -271,6 +287,7 @@ Jonathan Davies jpds@protonmail.com Jones Shi shilei@hotstone.com.cn Jorik Jonker jorik.jonker@eu.equinix.com Joseph-Irving joseph.irving500@gmail.com +Joseph Sheng jiajun.sheng@microfocus.com Joseph Stevens thejosephstevens@gmail.com Joshua Roppo joshroppo@gmail.com Juan Jimenez-Anca cortopy@users.noreply.github.com @@ -301,10 +318,12 @@ Konstantin Aksenov konstantin.aksenov@flant.com Kornilios Kourtis kornilios@isovalent.com Laurent Bernaille laurent.bernaille@datadoghq.com Lehner Florian dev@der-flo.net +Leonard Cohnen lc@edgeless.systems leonliao xiaobo.liao@gmail.com Liang Zhou zhoul110@chinatelecom.cn Li Chengyuan chengyuanli@hotmail.com LiHui andrewli@yunify.com +Lin Dong lindongld@google.com Lin Sun lin.sun@solo.io Lior Rozen liorr@tailorbrands.com Liu Qun qunliu@zyhx-group.com @@ -343,11 +362,13 @@ Marga Manterola marga@isovalent.com Mario Constanti mario@constanti.de Marius Gerling marius.gerling@uniberg.com Mark deVilliers markdevilliers@gmail.com +Mark Pashmfouroush mark@isovalent.com Markus Blaschke mblaschke82@gmail.com Martin Charles martincharles07@gmail.com Martin Koppehel martin.koppehel@st.ovgu.de Martin Odstrcilik martin.odstrcilik@gmail.com Martynas Pumputis m@lambda.lt +Marvin Gaube dev@marvingaube.de Marwin Baumann 56264798+marwinbaumannsbp@users.noreply.github.com Matej Gera matejgera@gmail.com Mathias Herzog mathu@gmx.ch @@ -358,6 +379,7 @@ Matthew Fenwick mfenwick100@gmail.com Matthew Gumport me@gum.pt Matthieu Antoine matthieu.antoine@jumo.world Matt Layher mdlayher@gmail.com +Matyáš Kroupa kroupa.matyas@gmail.com Mauricio Vásquez mauricio@kinvolk.io Maxime Brunet max@brnt.mx Maxime Visonneau maxime.visonneau@gmail.com @@ -382,6 +404,7 @@ MikeLing sabergeass@gmail.com Mitch Hulscher mitch.hulscher@lib.io Moh Ahmed moh.ahmed@cengn.ca Mohammad Yosefpor 47300215+m-yosefpor@users.noreply.github.com +Mohit Marathe mohitmarathe23@gmail.com Moritz Eckert m1gh7ym0@gmail.com Moritz Johner beller.moritz@googlemail.com Moshe Immerman moshe.immerman@vitalitygroup.com @@ -414,6 +437,7 @@ Odin Ugedal ougedal@palantir.com Oilbeater mengxin@alauda.io Oksana Baranova oksana.baranova@intel.com Ole Markus With o.with@sportradar.com +Oliver Hofmann 91730056+olinux-dev@users.noreply.github.com Oliver Ni oliver.ni@gmail.com Oliver Wang a0924100192@gmail.com Omar Aloraini ooraini.dev@gmail.com @@ -431,7 +455,9 @@ Paulo Gomes pjbgf@linux.com Pavel Pavlov 40396270+PavelPavlov46@users.noreply.github.com Paweł Prażak pawelprazak@users.noreply.github.com Peiqi Shi uestc.shi@gmail.com +pengbinbin1 pengbiny@163.com Pengfei Song pengfei.song@daocloud.io +Peter Jausovec peter.jausovec@solo.io Peter Slovak slovak.peto@gmail.com Philippe Lafoucrière philippe.lafoucriere@gmail.com Philipp Gniewosz philipp.gniewosz@daimlertruck.com @@ -452,7 +478,7 @@ Rahul Joshi rkjoshi@google.com Rajat Jindal rajatjindal83@gmail.com Raphael Campos raphael@accuknox.com Raphaël Pinson raphael@isovalent.com -Rastislav Szabo rastislav@kubermatic.com +Rastislav Szabo rastislav.szabo@isovalent.com Ray Bejjani ray.bejjani@gmail.com Raymond de Jong raymond.dejong@isovalent.com Reilly Brogan reilly@reillybrogan.com @@ -521,6 +547,7 @@ Sugang Li sugangli@google.com Sven Haardiek sven.haardiek@uni-muenster.de Swaminathan Vasudevan svasudevan@suse.com Taeung Song treeze.taeung@gmail.com +Takayoshi Nishida takayoshi.nishida@gmail.com Tam Mach tam.mach@cilium.io Tasdik Rahman prodicus@outlook.com Te-Yu Chang dale.teyuchang@gmail.com @@ -549,6 +576,7 @@ Tom Payne twpayne@gmail.com Tony Lambiris tony@criticalstack.com Tony Lu tonylu@linux.alibaba.com Tony Norlin tony.norlin@localdomain.se +Tore S. Loenoey tore.lonoy@gmail.com Travis Glenn Hansen travisghansen@yahoo.com Trevor Roberts Jr Trevor.Roberts.Jr@gmail.com Trevor Tao trevor.tao@arm.com @@ -560,9 +588,9 @@ Vigneshwaren Sunder vickymailed@gmail.com Viktor Kuzmin kvaster@gmail.com Viktor Oreshkin imselfish@stek29.rocks Ville Ojamo bluikko@users.noreply.github.com -Vincent Li v.li@f5.com +Vincent Li vincent.mc.li@gmail.com Vipul Singh vipul21sept@gmail.com -Vishal Choudhary contactvishaltech@gmail.com +Vishal Choudhary sendtovishalchoudhary@gmail.com Vishnu Soman K vishnusomank05@gmail.com Vlad Artamonov 742047+vladdy@users.noreply.github.com Vlad Gorodetsky v@gor.io @@ -611,6 +639,7 @@ Zang Li zangli@google.com zhanghe9702 zhanghe9702@163.com Zhang Qiang qiangzhang@qiyi.com Zhiyuan Hou zhiyuan2048@linux.alibaba.com +zhouhaibing089 zhouhaibing089@gmail.com Zhu Yan hackzhuyan@gmail.com Zisis Lianas zl@consol.de diff --git a/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go b/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go deleted file mode 100644 index 04bc1154a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/cilium_api_client.go +++ /dev/null @@ -1,150 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package client - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "github.com/go-openapi/runtime" - httptransport "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/client/daemon" - "github.com/cilium/cilium/api/v1/client/endpoint" - "github.com/cilium/cilium/api/v1/client/ipam" - "github.com/cilium/cilium/api/v1/client/metrics" - "github.com/cilium/cilium/api/v1/client/policy" - "github.com/cilium/cilium/api/v1/client/prefilter" - "github.com/cilium/cilium/api/v1/client/recorder" - "github.com/cilium/cilium/api/v1/client/service" -) - -// Default cilium API HTTP client. -var Default = NewHTTPClient(nil) - -const ( - // DefaultHost is the default Host - // found in Meta (info) section of spec file - DefaultHost string = "localhost" - // DefaultBasePath is the default BasePath - // found in Meta (info) section of spec file - DefaultBasePath string = "/v1" -) - -// DefaultSchemes are the default schemes found in Meta (info) section of spec file -var DefaultSchemes = []string{"http"} - -// NewHTTPClient creates a new cilium API HTTP client. -func NewHTTPClient(formats strfmt.Registry) *CiliumAPI { - return NewHTTPClientWithConfig(formats, nil) -} - -// NewHTTPClientWithConfig creates a new cilium API HTTP client, -// using a customizable transport config. -func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *CiliumAPI { - // ensure nullable parameters have default - if cfg == nil { - cfg = DefaultTransportConfig() - } - - // create transport and client - transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) - return New(transport, formats) -} - -// New creates a new cilium API client -func New(transport runtime.ClientTransport, formats strfmt.Registry) *CiliumAPI { - // ensure nullable parameters have default - if formats == nil { - formats = strfmt.Default - } - - cli := new(CiliumAPI) - cli.Transport = transport - cli.Daemon = daemon.New(transport, formats) - cli.Endpoint = endpoint.New(transport, formats) - cli.Ipam = ipam.New(transport, formats) - cli.Metrics = metrics.New(transport, formats) - cli.Policy = policy.New(transport, formats) - cli.Prefilter = prefilter.New(transport, formats) - cli.Recorder = recorder.New(transport, formats) - cli.Service = service.New(transport, formats) - return cli -} - -// DefaultTransportConfig creates a TransportConfig with the -// default settings taken from the meta section of the spec file. -func DefaultTransportConfig() *TransportConfig { - return &TransportConfig{ - Host: DefaultHost, - BasePath: DefaultBasePath, - Schemes: DefaultSchemes, - } -} - -// TransportConfig contains the transport related info, -// found in the meta section of the spec file. -type TransportConfig struct { - Host string - BasePath string - Schemes []string -} - -// WithHost overrides the default host, -// provided by the meta section of the spec file. -func (cfg *TransportConfig) WithHost(host string) *TransportConfig { - cfg.Host = host - return cfg -} - -// WithBasePath overrides the default basePath, -// provided by the meta section of the spec file. -func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { - cfg.BasePath = basePath - return cfg -} - -// WithSchemes overrides the default schemes, -// provided by the meta section of the spec file. -func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { - cfg.Schemes = schemes - return cfg -} - -// CiliumAPI is a client for cilium API -type CiliumAPI struct { - Daemon daemon.ClientService - - Endpoint endpoint.ClientService - - Ipam ipam.ClientService - - Metrics metrics.ClientService - - Policy policy.ClientService - - Prefilter prefilter.ClientService - - Recorder recorder.ClientService - - Service service.ClientService - - Transport runtime.ClientTransport -} - -// SetTransport changes the transport on the client and all its subresources -func (c *CiliumAPI) SetTransport(transport runtime.ClientTransport) { - c.Transport = transport - c.Daemon.SetTransport(transport) - c.Endpoint.SetTransport(transport) - c.Ipam.SetTransport(transport) - c.Metrics.SetTransport(transport) - c.Policy.SetTransport(transport) - c.Prefilter.SetTransport(transport) - c.Recorder.SetTransport(transport) - c.Service.SetTransport(transport) -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go deleted file mode 100644 index 0f824d32b..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/daemon_client.go +++ /dev/null @@ -1,459 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new daemon API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for daemon API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - GetCgroupDumpMetadata(params *GetCgroupDumpMetadataParams, opts ...ClientOption) (*GetCgroupDumpMetadataOK, error) - - GetClusterNodes(params *GetClusterNodesParams, opts ...ClientOption) (*GetClusterNodesOK, error) - - GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetConfigOK, error) - - GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error) - - GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) - - GetMap(params *GetMapParams, opts ...ClientOption) (*GetMapOK, error) - - GetMapName(params *GetMapNameParams, opts ...ClientOption) (*GetMapNameOK, error) - - GetMapNameEvents(params *GetMapNameEventsParams, writer io.Writer, opts ...ClientOption) (*GetMapNameEventsOK, error) - - GetNodeIds(params *GetNodeIdsParams, opts ...ClientOption) (*GetNodeIdsOK, error) - - PatchConfig(params *PatchConfigParams, opts ...ClientOption) (*PatchConfigOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -GetCgroupDumpMetadata retrieves cgroup metadata for all pods -*/ -func (a *Client) GetCgroupDumpMetadata(params *GetCgroupDumpMetadataParams, opts ...ClientOption) (*GetCgroupDumpMetadataOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetCgroupDumpMetadataParams() - } - op := &runtime.ClientOperation{ - ID: "GetCgroupDumpMetadata", - Method: "GET", - PathPattern: "/cgroup-dump-metadata", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetCgroupDumpMetadataReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetCgroupDumpMetadataOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetCgroupDumpMetadata: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetClusterNodes gets nodes information stored in the cilium agent -*/ -func (a *Client) GetClusterNodes(params *GetClusterNodesParams, opts ...ClientOption) (*GetClusterNodesOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetClusterNodesParams() - } - op := &runtime.ClientOperation{ - ID: "GetClusterNodes", - Method: "GET", - PathPattern: "/cluster/nodes", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetClusterNodesReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetClusterNodesOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetClusterNodes: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetConfig gets configuration of cilium daemon - -Returns the configuration of the Cilium daemon. -*/ -func (a *Client) GetConfig(params *GetConfigParams, opts ...ClientOption) (*GetConfigOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetConfigParams() - } - op := &runtime.ClientOperation{ - ID: "GetConfig", - Method: "GET", - PathPattern: "/config", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetConfigReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetConfigOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetDebuginfo retrieves information about the agent and evironment for debugging -*/ -func (a *Client) GetDebuginfo(params *GetDebuginfoParams, opts ...ClientOption) (*GetDebuginfoOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetDebuginfoParams() - } - op := &runtime.ClientOperation{ - ID: "GetDebuginfo", - Method: "GET", - PathPattern: "/debuginfo", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetDebuginfoReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetDebuginfoOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetDebuginfo: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - GetHealthz gets health of cilium daemon - - Returns health and status information of the Cilium daemon and related - -components such as the local container runtime, connected datastore, -Kubernetes integration and Hubble. -*/ -func (a *Client) GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetHealthzParams() - } - op := &runtime.ClientOperation{ - ID: "GetHealthz", - Method: "GET", - PathPattern: "/healthz", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetHealthzReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetHealthzOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetMap lists all open maps -*/ -func (a *Client) GetMap(params *GetMapParams, opts ...ClientOption) (*GetMapOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetMapParams() - } - op := &runtime.ClientOperation{ - ID: "GetMap", - Method: "GET", - PathPattern: "/map", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetMapReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetMapOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetMap: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetMapName retrieves contents of b p f map -*/ -func (a *Client) GetMapName(params *GetMapNameParams, opts ...ClientOption) (*GetMapNameOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetMapNameParams() - } - op := &runtime.ClientOperation{ - ID: "GetMapName", - Method: "GET", - PathPattern: "/map/{name}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetMapNameReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetMapNameOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetMapName: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetMapNameEvents retrieves the recent event logs associated with this endpoint -*/ -func (a *Client) GetMapNameEvents(params *GetMapNameEventsParams, writer io.Writer, opts ...ClientOption) (*GetMapNameEventsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetMapNameEventsParams() - } - op := &runtime.ClientOperation{ - ID: "GetMapNameEvents", - Method: "GET", - PathPattern: "/map/{name}/events", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetMapNameEventsReader{formats: a.formats, writer: writer}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetMapNameEventsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetMapNameEvents: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - GetNodeIds lists information about known node i ds - - Retrieves a list of node IDs allocated by the agent and their - -associated node IP addresses. -*/ -func (a *Client) GetNodeIds(params *GetNodeIdsParams, opts ...ClientOption) (*GetNodeIdsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetNodeIdsParams() - } - op := &runtime.ClientOperation{ - ID: "GetNodeIds", - Method: "GET", - PathPattern: "/node/ids", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetNodeIdsReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetNodeIdsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetNodeIds: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - PatchConfig modifies daemon configuration - - Updates the daemon configuration by applying the provided - -ConfigurationMap and regenerates & recompiles all required datapath -components. -*/ -func (a *Client) PatchConfig(params *PatchConfigParams, opts ...ClientOption) (*PatchConfigOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPatchConfigParams() - } - op := &runtime.ClientOperation{ - ID: "PatchConfig", - Method: "PATCH", - PathPattern: "/config", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PatchConfigReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PatchConfigOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PatchConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_parameters.go deleted file mode 100644 index 70b51046c..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetCgroupDumpMetadataParams creates a new GetCgroupDumpMetadataParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetCgroupDumpMetadataParams() *GetCgroupDumpMetadataParams { - return &GetCgroupDumpMetadataParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetCgroupDumpMetadataParamsWithTimeout creates a new GetCgroupDumpMetadataParams object -// with the ability to set a timeout on a request. -func NewGetCgroupDumpMetadataParamsWithTimeout(timeout time.Duration) *GetCgroupDumpMetadataParams { - return &GetCgroupDumpMetadataParams{ - timeout: timeout, - } -} - -// NewGetCgroupDumpMetadataParamsWithContext creates a new GetCgroupDumpMetadataParams object -// with the ability to set a context for a request. -func NewGetCgroupDumpMetadataParamsWithContext(ctx context.Context) *GetCgroupDumpMetadataParams { - return &GetCgroupDumpMetadataParams{ - Context: ctx, - } -} - -// NewGetCgroupDumpMetadataParamsWithHTTPClient creates a new GetCgroupDumpMetadataParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetCgroupDumpMetadataParamsWithHTTPClient(client *http.Client) *GetCgroupDumpMetadataParams { - return &GetCgroupDumpMetadataParams{ - HTTPClient: client, - } -} - -/* -GetCgroupDumpMetadataParams contains all the parameters to send to the API endpoint - - for the get cgroup dump metadata operation. - - Typically these are written to a http.Request. -*/ -type GetCgroupDumpMetadataParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get cgroup dump metadata params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetCgroupDumpMetadataParams) WithDefaults() *GetCgroupDumpMetadataParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get cgroup dump metadata params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetCgroupDumpMetadataParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get cgroup dump metadata params -func (o *GetCgroupDumpMetadataParams) WithTimeout(timeout time.Duration) *GetCgroupDumpMetadataParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get cgroup dump metadata params -func (o *GetCgroupDumpMetadataParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get cgroup dump metadata params -func (o *GetCgroupDumpMetadataParams) WithContext(ctx context.Context) *GetCgroupDumpMetadataParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get cgroup dump metadata params -func (o *GetCgroupDumpMetadataParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get cgroup dump metadata params -func (o *GetCgroupDumpMetadataParams) WithHTTPClient(client *http.Client) *GetCgroupDumpMetadataParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get cgroup dump metadata params -func (o *GetCgroupDumpMetadataParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetCgroupDumpMetadataParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go deleted file mode 100644 index 3c16b3697..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cgroup_dump_metadata_responses.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetCgroupDumpMetadataReader is a Reader for the GetCgroupDumpMetadata structure. -type GetCgroupDumpMetadataReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetCgroupDumpMetadataReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetCgroupDumpMetadataOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewGetCgroupDumpMetadataFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetCgroupDumpMetadataOK creates a GetCgroupDumpMetadataOK with default headers values -func NewGetCgroupDumpMetadataOK() *GetCgroupDumpMetadataOK { - return &GetCgroupDumpMetadataOK{} -} - -/* -GetCgroupDumpMetadataOK describes a response with status code 200, with default header values. - -Success -*/ -type GetCgroupDumpMetadataOK struct { - Payload *models.CgroupDumpMetadata -} - -// IsSuccess returns true when this get cgroup dump metadata o k response has a 2xx status code -func (o *GetCgroupDumpMetadataOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get cgroup dump metadata o k response has a 3xx status code -func (o *GetCgroupDumpMetadataOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get cgroup dump metadata o k response has a 4xx status code -func (o *GetCgroupDumpMetadataOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get cgroup dump metadata o k response has a 5xx status code -func (o *GetCgroupDumpMetadataOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get cgroup dump metadata o k response a status code equal to that given -func (o *GetCgroupDumpMetadataOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetCgroupDumpMetadataOK) Error() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %+v", 200, o.Payload) -} - -func (o *GetCgroupDumpMetadataOK) String() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataOK %+v", 200, o.Payload) -} - -func (o *GetCgroupDumpMetadataOK) GetPayload() *models.CgroupDumpMetadata { - return o.Payload -} - -func (o *GetCgroupDumpMetadataOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.CgroupDumpMetadata) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetCgroupDumpMetadataFailure creates a GetCgroupDumpMetadataFailure with default headers values -func NewGetCgroupDumpMetadataFailure() *GetCgroupDumpMetadataFailure { - return &GetCgroupDumpMetadataFailure{} -} - -/* -GetCgroupDumpMetadataFailure describes a response with status code 500, with default header values. - -CgroupDumpMetadata get failed -*/ -type GetCgroupDumpMetadataFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this get cgroup dump metadata failure response has a 2xx status code -func (o *GetCgroupDumpMetadataFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get cgroup dump metadata failure response has a 3xx status code -func (o *GetCgroupDumpMetadataFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get cgroup dump metadata failure response has a 4xx status code -func (o *GetCgroupDumpMetadataFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this get cgroup dump metadata failure response has a 5xx status code -func (o *GetCgroupDumpMetadataFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this get cgroup dump metadata failure response a status code equal to that given -func (o *GetCgroupDumpMetadataFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *GetCgroupDumpMetadataFailure) Error() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %+v", 500, o.Payload) -} - -func (o *GetCgroupDumpMetadataFailure) String() string { - return fmt.Sprintf("[GET /cgroup-dump-metadata][%d] getCgroupDumpMetadataFailure %+v", 500, o.Payload) -} - -func (o *GetCgroupDumpMetadataFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *GetCgroupDumpMetadataFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_parameters.go deleted file mode 100644 index f99962195..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_parameters.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewGetClusterNodesParams creates a new GetClusterNodesParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetClusterNodesParams() *GetClusterNodesParams { - return &GetClusterNodesParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetClusterNodesParamsWithTimeout creates a new GetClusterNodesParams object -// with the ability to set a timeout on a request. -func NewGetClusterNodesParamsWithTimeout(timeout time.Duration) *GetClusterNodesParams { - return &GetClusterNodesParams{ - timeout: timeout, - } -} - -// NewGetClusterNodesParamsWithContext creates a new GetClusterNodesParams object -// with the ability to set a context for a request. -func NewGetClusterNodesParamsWithContext(ctx context.Context) *GetClusterNodesParams { - return &GetClusterNodesParams{ - Context: ctx, - } -} - -// NewGetClusterNodesParamsWithHTTPClient creates a new GetClusterNodesParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetClusterNodesParamsWithHTTPClient(client *http.Client) *GetClusterNodesParams { - return &GetClusterNodesParams{ - HTTPClient: client, - } -} - -/* -GetClusterNodesParams contains all the parameters to send to the API endpoint - - for the get cluster nodes operation. - - Typically these are written to a http.Request. -*/ -type GetClusterNodesParams struct { - - /* ClientID. - - Client UUID should be used when the client wants to request - a diff of nodes added and / or removed since the last time - that client has made a request. - - */ - ClientID *int64 - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get cluster nodes params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetClusterNodesParams) WithDefaults() *GetClusterNodesParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get cluster nodes params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetClusterNodesParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get cluster nodes params -func (o *GetClusterNodesParams) WithTimeout(timeout time.Duration) *GetClusterNodesParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get cluster nodes params -func (o *GetClusterNodesParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get cluster nodes params -func (o *GetClusterNodesParams) WithContext(ctx context.Context) *GetClusterNodesParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get cluster nodes params -func (o *GetClusterNodesParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get cluster nodes params -func (o *GetClusterNodesParams) WithHTTPClient(client *http.Client) *GetClusterNodesParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get cluster nodes params -func (o *GetClusterNodesParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithClientID adds the clientID to the get cluster nodes params -func (o *GetClusterNodesParams) WithClientID(clientID *int64) *GetClusterNodesParams { - o.SetClientID(clientID) - return o -} - -// SetClientID adds the clientId to the get cluster nodes params -func (o *GetClusterNodesParams) SetClientID(clientID *int64) { - o.ClientID = clientID -} - -// WriteToRequest writes these params to a swagger request -func (o *GetClusterNodesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.ClientID != nil { - - // header param client-id - if err := r.SetHeaderParam("client-id", swag.FormatInt64(*o.ClientID)); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go deleted file mode 100644 index 2f767bbf5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_cluster_nodes_responses.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetClusterNodesReader is a Reader for the GetClusterNodes structure. -type GetClusterNodesReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetClusterNodesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetClusterNodesOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetClusterNodesOK creates a GetClusterNodesOK with default headers values -func NewGetClusterNodesOK() *GetClusterNodesOK { - return &GetClusterNodesOK{} -} - -/* -GetClusterNodesOK describes a response with status code 200, with default header values. - -Success -*/ -type GetClusterNodesOK struct { - Payload *models.ClusterNodeStatus -} - -// IsSuccess returns true when this get cluster nodes o k response has a 2xx status code -func (o *GetClusterNodesOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get cluster nodes o k response has a 3xx status code -func (o *GetClusterNodesOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get cluster nodes o k response has a 4xx status code -func (o *GetClusterNodesOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get cluster nodes o k response has a 5xx status code -func (o *GetClusterNodesOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get cluster nodes o k response a status code equal to that given -func (o *GetClusterNodesOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetClusterNodesOK) Error() string { - return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %+v", 200, o.Payload) -} - -func (o *GetClusterNodesOK) String() string { - return fmt.Sprintf("[GET /cluster/nodes][%d] getClusterNodesOK %+v", 200, o.Payload) -} - -func (o *GetClusterNodesOK) GetPayload() *models.ClusterNodeStatus { - return o.Payload -} - -func (o *GetClusterNodesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.ClusterNodeStatus) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_parameters.go deleted file mode 100644 index e741bd356..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetConfigParams creates a new GetConfigParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetConfigParams() *GetConfigParams { - return &GetConfigParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetConfigParamsWithTimeout creates a new GetConfigParams object -// with the ability to set a timeout on a request. -func NewGetConfigParamsWithTimeout(timeout time.Duration) *GetConfigParams { - return &GetConfigParams{ - timeout: timeout, - } -} - -// NewGetConfigParamsWithContext creates a new GetConfigParams object -// with the ability to set a context for a request. -func NewGetConfigParamsWithContext(ctx context.Context) *GetConfigParams { - return &GetConfigParams{ - Context: ctx, - } -} - -// NewGetConfigParamsWithHTTPClient creates a new GetConfigParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetConfigParamsWithHTTPClient(client *http.Client) *GetConfigParams { - return &GetConfigParams{ - HTTPClient: client, - } -} - -/* -GetConfigParams contains all the parameters to send to the API endpoint - - for the get config operation. - - Typically these are written to a http.Request. -*/ -type GetConfigParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetConfigParams) WithDefaults() *GetConfigParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetConfigParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get config params -func (o *GetConfigParams) WithTimeout(timeout time.Duration) *GetConfigParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get config params -func (o *GetConfigParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get config params -func (o *GetConfigParams) WithContext(ctx context.Context) *GetConfigParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get config params -func (o *GetConfigParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get config params -func (o *GetConfigParams) WithHTTPClient(client *http.Client) *GetConfigParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get config params -func (o *GetConfigParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go deleted file mode 100644 index 10da9ace5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_config_responses.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetConfigReader is a Reader for the GetConfig structure. -type GetConfigReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetConfigOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetConfigOK creates a GetConfigOK with default headers values -func NewGetConfigOK() *GetConfigOK { - return &GetConfigOK{} -} - -/* -GetConfigOK describes a response with status code 200, with default header values. - -Success -*/ -type GetConfigOK struct { - Payload *models.DaemonConfiguration -} - -// IsSuccess returns true when this get config o k response has a 2xx status code -func (o *GetConfigOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get config o k response has a 3xx status code -func (o *GetConfigOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get config o k response has a 4xx status code -func (o *GetConfigOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get config o k response has a 5xx status code -func (o *GetConfigOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get config o k response a status code equal to that given -func (o *GetConfigOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetConfigOK) Error() string { - return fmt.Sprintf("[GET /config][%d] getConfigOK %+v", 200, o.Payload) -} - -func (o *GetConfigOK) String() string { - return fmt.Sprintf("[GET /config][%d] getConfigOK %+v", 200, o.Payload) -} - -func (o *GetConfigOK) GetPayload() *models.DaemonConfiguration { - return o.Payload -} - -func (o *GetConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.DaemonConfiguration) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_parameters.go deleted file mode 100644 index 28eed7d3a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetDebuginfoParams creates a new GetDebuginfoParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetDebuginfoParams() *GetDebuginfoParams { - return &GetDebuginfoParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetDebuginfoParamsWithTimeout creates a new GetDebuginfoParams object -// with the ability to set a timeout on a request. -func NewGetDebuginfoParamsWithTimeout(timeout time.Duration) *GetDebuginfoParams { - return &GetDebuginfoParams{ - timeout: timeout, - } -} - -// NewGetDebuginfoParamsWithContext creates a new GetDebuginfoParams object -// with the ability to set a context for a request. -func NewGetDebuginfoParamsWithContext(ctx context.Context) *GetDebuginfoParams { - return &GetDebuginfoParams{ - Context: ctx, - } -} - -// NewGetDebuginfoParamsWithHTTPClient creates a new GetDebuginfoParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetDebuginfoParamsWithHTTPClient(client *http.Client) *GetDebuginfoParams { - return &GetDebuginfoParams{ - HTTPClient: client, - } -} - -/* -GetDebuginfoParams contains all the parameters to send to the API endpoint - - for the get debuginfo operation. - - Typically these are written to a http.Request. -*/ -type GetDebuginfoParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get debuginfo params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetDebuginfoParams) WithDefaults() *GetDebuginfoParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get debuginfo params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetDebuginfoParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get debuginfo params -func (o *GetDebuginfoParams) WithTimeout(timeout time.Duration) *GetDebuginfoParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get debuginfo params -func (o *GetDebuginfoParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get debuginfo params -func (o *GetDebuginfoParams) WithContext(ctx context.Context) *GetDebuginfoParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get debuginfo params -func (o *GetDebuginfoParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get debuginfo params -func (o *GetDebuginfoParams) WithHTTPClient(client *http.Client) *GetDebuginfoParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get debuginfo params -func (o *GetDebuginfoParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetDebuginfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go deleted file mode 100644 index a7fa07a31..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_debuginfo_responses.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetDebuginfoReader is a Reader for the GetDebuginfo structure. -type GetDebuginfoReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetDebuginfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetDebuginfoOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewGetDebuginfoFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetDebuginfoOK creates a GetDebuginfoOK with default headers values -func NewGetDebuginfoOK() *GetDebuginfoOK { - return &GetDebuginfoOK{} -} - -/* -GetDebuginfoOK describes a response with status code 200, with default header values. - -Success -*/ -type GetDebuginfoOK struct { - Payload *models.DebugInfo -} - -// IsSuccess returns true when this get debuginfo o k response has a 2xx status code -func (o *GetDebuginfoOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get debuginfo o k response has a 3xx status code -func (o *GetDebuginfoOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get debuginfo o k response has a 4xx status code -func (o *GetDebuginfoOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get debuginfo o k response has a 5xx status code -func (o *GetDebuginfoOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get debuginfo o k response a status code equal to that given -func (o *GetDebuginfoOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetDebuginfoOK) Error() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %+v", 200, o.Payload) -} - -func (o *GetDebuginfoOK) String() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoOK %+v", 200, o.Payload) -} - -func (o *GetDebuginfoOK) GetPayload() *models.DebugInfo { - return o.Payload -} - -func (o *GetDebuginfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.DebugInfo) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetDebuginfoFailure creates a GetDebuginfoFailure with default headers values -func NewGetDebuginfoFailure() *GetDebuginfoFailure { - return &GetDebuginfoFailure{} -} - -/* -GetDebuginfoFailure describes a response with status code 500, with default header values. - -DebugInfo get failed -*/ -type GetDebuginfoFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this get debuginfo failure response has a 2xx status code -func (o *GetDebuginfoFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get debuginfo failure response has a 3xx status code -func (o *GetDebuginfoFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get debuginfo failure response has a 4xx status code -func (o *GetDebuginfoFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this get debuginfo failure response has a 5xx status code -func (o *GetDebuginfoFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this get debuginfo failure response a status code equal to that given -func (o *GetDebuginfoFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *GetDebuginfoFailure) Error() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %+v", 500, o.Payload) -} - -func (o *GetDebuginfoFailure) String() string { - return fmt.Sprintf("[GET /debuginfo][%d] getDebuginfoFailure %+v", 500, o.Payload) -} - -func (o *GetDebuginfoFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *GetDebuginfoFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go deleted file mode 100644 index 235c69da3..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_parameters.go +++ /dev/null @@ -1,159 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewGetHealthzParams creates a new GetHealthzParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetHealthzParams() *GetHealthzParams { - return &GetHealthzParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetHealthzParamsWithTimeout creates a new GetHealthzParams object -// with the ability to set a timeout on a request. -func NewGetHealthzParamsWithTimeout(timeout time.Duration) *GetHealthzParams { - return &GetHealthzParams{ - timeout: timeout, - } -} - -// NewGetHealthzParamsWithContext creates a new GetHealthzParams object -// with the ability to set a context for a request. -func NewGetHealthzParamsWithContext(ctx context.Context) *GetHealthzParams { - return &GetHealthzParams{ - Context: ctx, - } -} - -// NewGetHealthzParamsWithHTTPClient creates a new GetHealthzParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetHealthzParamsWithHTTPClient(client *http.Client) *GetHealthzParams { - return &GetHealthzParams{ - HTTPClient: client, - } -} - -/* -GetHealthzParams contains all the parameters to send to the API endpoint - - for the get healthz operation. - - Typically these are written to a http.Request. -*/ -type GetHealthzParams struct { - - /* Brief. - - Brief will return a brief representation of the Cilium status. - - */ - Brief *bool - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get healthz params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetHealthzParams) WithDefaults() *GetHealthzParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get healthz params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetHealthzParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get healthz params -func (o *GetHealthzParams) WithTimeout(timeout time.Duration) *GetHealthzParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get healthz params -func (o *GetHealthzParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get healthz params -func (o *GetHealthzParams) WithContext(ctx context.Context) *GetHealthzParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get healthz params -func (o *GetHealthzParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get healthz params -func (o *GetHealthzParams) WithHTTPClient(client *http.Client) *GetHealthzParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get healthz params -func (o *GetHealthzParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithBrief adds the brief to the get healthz params -func (o *GetHealthzParams) WithBrief(brief *bool) *GetHealthzParams { - o.SetBrief(brief) - return o -} - -// SetBrief adds the brief to the get healthz params -func (o *GetHealthzParams) SetBrief(brief *bool) { - o.Brief = brief -} - -// WriteToRequest writes these params to a swagger request -func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.Brief != nil { - - // header param brief - if err := r.SetHeaderParam("brief", swag.FormatBool(*o.Brief)); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go deleted file mode 100644 index c3b73feea..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_healthz_responses.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetHealthzReader is a Reader for the GetHealthz structure. -type GetHealthzReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetHealthzOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetHealthzOK creates a GetHealthzOK with default headers values -func NewGetHealthzOK() *GetHealthzOK { - return &GetHealthzOK{} -} - -/* -GetHealthzOK describes a response with status code 200, with default header values. - -Success -*/ -type GetHealthzOK struct { - Payload *models.StatusResponse -} - -// IsSuccess returns true when this get healthz o k response has a 2xx status code -func (o *GetHealthzOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get healthz o k response has a 3xx status code -func (o *GetHealthzOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get healthz o k response has a 4xx status code -func (o *GetHealthzOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get healthz o k response has a 5xx status code -func (o *GetHealthzOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get healthz o k response a status code equal to that given -func (o *GetHealthzOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetHealthzOK) Error() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) -} - -func (o *GetHealthzOK) String() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) -} - -func (o *GetHealthzOK) GetPayload() *models.StatusResponse { - return o.Payload -} - -func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.StatusResponse) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_parameters.go deleted file mode 100644 index 41520f507..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_parameters.go +++ /dev/null @@ -1,189 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewGetMapNameEventsParams creates a new GetMapNameEventsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetMapNameEventsParams() *GetMapNameEventsParams { - return &GetMapNameEventsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetMapNameEventsParamsWithTimeout creates a new GetMapNameEventsParams object -// with the ability to set a timeout on a request. -func NewGetMapNameEventsParamsWithTimeout(timeout time.Duration) *GetMapNameEventsParams { - return &GetMapNameEventsParams{ - timeout: timeout, - } -} - -// NewGetMapNameEventsParamsWithContext creates a new GetMapNameEventsParams object -// with the ability to set a context for a request. -func NewGetMapNameEventsParamsWithContext(ctx context.Context) *GetMapNameEventsParams { - return &GetMapNameEventsParams{ - Context: ctx, - } -} - -// NewGetMapNameEventsParamsWithHTTPClient creates a new GetMapNameEventsParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetMapNameEventsParamsWithHTTPClient(client *http.Client) *GetMapNameEventsParams { - return &GetMapNameEventsParams{ - HTTPClient: client, - } -} - -/* -GetMapNameEventsParams contains all the parameters to send to the API endpoint - - for the get map name events operation. - - Typically these are written to a http.Request. -*/ -type GetMapNameEventsParams struct { - - /* Follow. - - Whether to follow streamed requests - */ - Follow *bool - - /* Name. - - Name of map - */ - Name string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get map name events params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMapNameEventsParams) WithDefaults() *GetMapNameEventsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get map name events params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMapNameEventsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get map name events params -func (o *GetMapNameEventsParams) WithTimeout(timeout time.Duration) *GetMapNameEventsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get map name events params -func (o *GetMapNameEventsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get map name events params -func (o *GetMapNameEventsParams) WithContext(ctx context.Context) *GetMapNameEventsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get map name events params -func (o *GetMapNameEventsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get map name events params -func (o *GetMapNameEventsParams) WithHTTPClient(client *http.Client) *GetMapNameEventsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get map name events params -func (o *GetMapNameEventsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithFollow adds the follow to the get map name events params -func (o *GetMapNameEventsParams) WithFollow(follow *bool) *GetMapNameEventsParams { - o.SetFollow(follow) - return o -} - -// SetFollow adds the follow to the get map name events params -func (o *GetMapNameEventsParams) SetFollow(follow *bool) { - o.Follow = follow -} - -// WithName adds the name to the get map name events params -func (o *GetMapNameEventsParams) WithName(name string) *GetMapNameEventsParams { - o.SetName(name) - return o -} - -// SetName adds the name to the get map name events params -func (o *GetMapNameEventsParams) SetName(name string) { - o.Name = name -} - -// WriteToRequest writes these params to a swagger request -func (o *GetMapNameEventsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.Follow != nil { - - // query param follow - var qrFollow bool - - if o.Follow != nil { - qrFollow = *o.Follow - } - qFollow := swag.FormatBool(qrFollow) - if qFollow != "" { - - if err := r.SetQueryParam("follow", qFollow); err != nil { - return err - } - } - } - - // path param name - if err := r.SetPathParam("name", o.Name); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go deleted file mode 100644 index 6dc187478..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_events_responses.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// GetMapNameEventsReader is a Reader for the GetMapNameEvents structure. -type GetMapNameEventsReader struct { - formats strfmt.Registry - writer io.Writer -} - -// ReadResponse reads a server response into the received o. -func (o *GetMapNameEventsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetMapNameEventsOK(o.writer) - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetMapNameEventsNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetMapNameEventsOK creates a GetMapNameEventsOK with default headers values -func NewGetMapNameEventsOK(writer io.Writer) *GetMapNameEventsOK { - return &GetMapNameEventsOK{ - - Payload: writer, - } -} - -/* -GetMapNameEventsOK describes a response with status code 200, with default header values. - -Success -*/ -type GetMapNameEventsOK struct { - Payload io.Writer -} - -// IsSuccess returns true when this get map name events o k response has a 2xx status code -func (o *GetMapNameEventsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get map name events o k response has a 3xx status code -func (o *GetMapNameEventsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get map name events o k response has a 4xx status code -func (o *GetMapNameEventsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get map name events o k response has a 5xx status code -func (o *GetMapNameEventsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get map name events o k response a status code equal to that given -func (o *GetMapNameEventsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetMapNameEventsOK) Error() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK %+v", 200, o.Payload) -} - -func (o *GetMapNameEventsOK) String() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsOK %+v", 200, o.Payload) -} - -func (o *GetMapNameEventsOK) GetPayload() io.Writer { - return o.Payload -} - -func (o *GetMapNameEventsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetMapNameEventsNotFound creates a GetMapNameEventsNotFound with default headers values -func NewGetMapNameEventsNotFound() *GetMapNameEventsNotFound { - return &GetMapNameEventsNotFound{} -} - -/* -GetMapNameEventsNotFound describes a response with status code 404, with default header values. - -Map not found -*/ -type GetMapNameEventsNotFound struct { -} - -// IsSuccess returns true when this get map name events not found response has a 2xx status code -func (o *GetMapNameEventsNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get map name events not found response has a 3xx status code -func (o *GetMapNameEventsNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get map name events not found response has a 4xx status code -func (o *GetMapNameEventsNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get map name events not found response has a 5xx status code -func (o *GetMapNameEventsNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get map name events not found response a status code equal to that given -func (o *GetMapNameEventsNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetMapNameEventsNotFound) Error() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound ", 404) -} - -func (o *GetMapNameEventsNotFound) String() string { - return fmt.Sprintf("[GET /map/{name}/events][%d] getMapNameEventsNotFound ", 404) -} - -func (o *GetMapNameEventsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_parameters.go deleted file mode 100644 index 9465ac2bb..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_parameters.go +++ /dev/null @@ -1,154 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetMapNameParams creates a new GetMapNameParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetMapNameParams() *GetMapNameParams { - return &GetMapNameParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetMapNameParamsWithTimeout creates a new GetMapNameParams object -// with the ability to set a timeout on a request. -func NewGetMapNameParamsWithTimeout(timeout time.Duration) *GetMapNameParams { - return &GetMapNameParams{ - timeout: timeout, - } -} - -// NewGetMapNameParamsWithContext creates a new GetMapNameParams object -// with the ability to set a context for a request. -func NewGetMapNameParamsWithContext(ctx context.Context) *GetMapNameParams { - return &GetMapNameParams{ - Context: ctx, - } -} - -// NewGetMapNameParamsWithHTTPClient creates a new GetMapNameParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetMapNameParamsWithHTTPClient(client *http.Client) *GetMapNameParams { - return &GetMapNameParams{ - HTTPClient: client, - } -} - -/* -GetMapNameParams contains all the parameters to send to the API endpoint - - for the get map name operation. - - Typically these are written to a http.Request. -*/ -type GetMapNameParams struct { - - /* Name. - - Name of map - */ - Name string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get map name params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMapNameParams) WithDefaults() *GetMapNameParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get map name params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMapNameParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get map name params -func (o *GetMapNameParams) WithTimeout(timeout time.Duration) *GetMapNameParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get map name params -func (o *GetMapNameParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get map name params -func (o *GetMapNameParams) WithContext(ctx context.Context) *GetMapNameParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get map name params -func (o *GetMapNameParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get map name params -func (o *GetMapNameParams) WithHTTPClient(client *http.Client) *GetMapNameParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get map name params -func (o *GetMapNameParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithName adds the name to the get map name params -func (o *GetMapNameParams) WithName(name string) *GetMapNameParams { - o.SetName(name) - return o -} - -// SetName adds the name to the get map name params -func (o *GetMapNameParams) SetName(name string) { - o.Name = name -} - -// WriteToRequest writes these params to a swagger request -func (o *GetMapNameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param name - if err := r.SetPathParam("name", o.Name); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go deleted file mode 100644 index f03de4ce3..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_name_responses.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetMapNameReader is a Reader for the GetMapName structure. -type GetMapNameReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetMapNameReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetMapNameOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetMapNameNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetMapNameOK creates a GetMapNameOK with default headers values -func NewGetMapNameOK() *GetMapNameOK { - return &GetMapNameOK{} -} - -/* -GetMapNameOK describes a response with status code 200, with default header values. - -Success -*/ -type GetMapNameOK struct { - Payload *models.BPFMap -} - -// IsSuccess returns true when this get map name o k response has a 2xx status code -func (o *GetMapNameOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get map name o k response has a 3xx status code -func (o *GetMapNameOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get map name o k response has a 4xx status code -func (o *GetMapNameOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get map name o k response has a 5xx status code -func (o *GetMapNameOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get map name o k response a status code equal to that given -func (o *GetMapNameOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetMapNameOK) Error() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %+v", 200, o.Payload) -} - -func (o *GetMapNameOK) String() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameOK %+v", 200, o.Payload) -} - -func (o *GetMapNameOK) GetPayload() *models.BPFMap { - return o.Payload -} - -func (o *GetMapNameOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.BPFMap) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetMapNameNotFound creates a GetMapNameNotFound with default headers values -func NewGetMapNameNotFound() *GetMapNameNotFound { - return &GetMapNameNotFound{} -} - -/* -GetMapNameNotFound describes a response with status code 404, with default header values. - -Map not found -*/ -type GetMapNameNotFound struct { -} - -// IsSuccess returns true when this get map name not found response has a 2xx status code -func (o *GetMapNameNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get map name not found response has a 3xx status code -func (o *GetMapNameNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get map name not found response has a 4xx status code -func (o *GetMapNameNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get map name not found response has a 5xx status code -func (o *GetMapNameNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get map name not found response a status code equal to that given -func (o *GetMapNameNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetMapNameNotFound) Error() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound ", 404) -} - -func (o *GetMapNameNotFound) String() string { - return fmt.Sprintf("[GET /map/{name}][%d] getMapNameNotFound ", 404) -} - -func (o *GetMapNameNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_parameters.go deleted file mode 100644 index 05cca29d7..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetMapParams creates a new GetMapParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetMapParams() *GetMapParams { - return &GetMapParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetMapParamsWithTimeout creates a new GetMapParams object -// with the ability to set a timeout on a request. -func NewGetMapParamsWithTimeout(timeout time.Duration) *GetMapParams { - return &GetMapParams{ - timeout: timeout, - } -} - -// NewGetMapParamsWithContext creates a new GetMapParams object -// with the ability to set a context for a request. -func NewGetMapParamsWithContext(ctx context.Context) *GetMapParams { - return &GetMapParams{ - Context: ctx, - } -} - -// NewGetMapParamsWithHTTPClient creates a new GetMapParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetMapParamsWithHTTPClient(client *http.Client) *GetMapParams { - return &GetMapParams{ - HTTPClient: client, - } -} - -/* -GetMapParams contains all the parameters to send to the API endpoint - - for the get map operation. - - Typically these are written to a http.Request. -*/ -type GetMapParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get map params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMapParams) WithDefaults() *GetMapParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get map params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMapParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get map params -func (o *GetMapParams) WithTimeout(timeout time.Duration) *GetMapParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get map params -func (o *GetMapParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get map params -func (o *GetMapParams) WithContext(ctx context.Context) *GetMapParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get map params -func (o *GetMapParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get map params -func (o *GetMapParams) WithHTTPClient(client *http.Client) *GetMapParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get map params -func (o *GetMapParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetMapParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go deleted file mode 100644 index dae2f2cf5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_map_responses.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetMapReader is a Reader for the GetMap structure. -type GetMapReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetMapReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetMapOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetMapOK creates a GetMapOK with default headers values -func NewGetMapOK() *GetMapOK { - return &GetMapOK{} -} - -/* -GetMapOK describes a response with status code 200, with default header values. - -Success -*/ -type GetMapOK struct { - Payload *models.BPFMapList -} - -// IsSuccess returns true when this get map o k response has a 2xx status code -func (o *GetMapOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get map o k response has a 3xx status code -func (o *GetMapOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get map o k response has a 4xx status code -func (o *GetMapOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get map o k response has a 5xx status code -func (o *GetMapOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get map o k response a status code equal to that given -func (o *GetMapOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetMapOK) Error() string { - return fmt.Sprintf("[GET /map][%d] getMapOK %+v", 200, o.Payload) -} - -func (o *GetMapOK) String() string { - return fmt.Sprintf("[GET /map][%d] getMapOK %+v", 200, o.Payload) -} - -func (o *GetMapOK) GetPayload() *models.BPFMapList { - return o.Payload -} - -func (o *GetMapOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.BPFMapList) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_parameters.go deleted file mode 100644 index 19fce1163..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetNodeIdsParams creates a new GetNodeIdsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetNodeIdsParams() *GetNodeIdsParams { - return &GetNodeIdsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetNodeIdsParamsWithTimeout creates a new GetNodeIdsParams object -// with the ability to set a timeout on a request. -func NewGetNodeIdsParamsWithTimeout(timeout time.Duration) *GetNodeIdsParams { - return &GetNodeIdsParams{ - timeout: timeout, - } -} - -// NewGetNodeIdsParamsWithContext creates a new GetNodeIdsParams object -// with the ability to set a context for a request. -func NewGetNodeIdsParamsWithContext(ctx context.Context) *GetNodeIdsParams { - return &GetNodeIdsParams{ - Context: ctx, - } -} - -// NewGetNodeIdsParamsWithHTTPClient creates a new GetNodeIdsParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetNodeIdsParamsWithHTTPClient(client *http.Client) *GetNodeIdsParams { - return &GetNodeIdsParams{ - HTTPClient: client, - } -} - -/* -GetNodeIdsParams contains all the parameters to send to the API endpoint - - for the get node ids operation. - - Typically these are written to a http.Request. -*/ -type GetNodeIdsParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get node ids params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetNodeIdsParams) WithDefaults() *GetNodeIdsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get node ids params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetNodeIdsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get node ids params -func (o *GetNodeIdsParams) WithTimeout(timeout time.Duration) *GetNodeIdsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get node ids params -func (o *GetNodeIdsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get node ids params -func (o *GetNodeIdsParams) WithContext(ctx context.Context) *GetNodeIdsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get node ids params -func (o *GetNodeIdsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get node ids params -func (o *GetNodeIdsParams) WithHTTPClient(client *http.Client) *GetNodeIdsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get node ids params -func (o *GetNodeIdsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetNodeIdsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go deleted file mode 100644 index 1b4131485..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/get_node_ids_responses.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetNodeIdsReader is a Reader for the GetNodeIds structure. -type GetNodeIdsReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetNodeIdsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetNodeIdsOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetNodeIdsOK creates a GetNodeIdsOK with default headers values -func NewGetNodeIdsOK() *GetNodeIdsOK { - return &GetNodeIdsOK{} -} - -/* -GetNodeIdsOK describes a response with status code 200, with default header values. - -Success -*/ -type GetNodeIdsOK struct { - Payload []*models.NodeID -} - -// IsSuccess returns true when this get node ids o k response has a 2xx status code -func (o *GetNodeIdsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get node ids o k response has a 3xx status code -func (o *GetNodeIdsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get node ids o k response has a 4xx status code -func (o *GetNodeIdsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get node ids o k response has a 5xx status code -func (o *GetNodeIdsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get node ids o k response a status code equal to that given -func (o *GetNodeIdsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetNodeIdsOK) Error() string { - return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %+v", 200, o.Payload) -} - -func (o *GetNodeIdsOK) String() string { - return fmt.Sprintf("[GET /node/ids][%d] getNodeIdsOK %+v", 200, o.Payload) -} - -func (o *GetNodeIdsOK) GetPayload() []*models.NodeID { - return o.Payload -} - -func (o *GetNodeIdsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_parameters.go deleted file mode 100644 index a95be301c..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_parameters.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPatchConfigParams creates a new PatchConfigParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPatchConfigParams() *PatchConfigParams { - return &PatchConfigParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPatchConfigParamsWithTimeout creates a new PatchConfigParams object -// with the ability to set a timeout on a request. -func NewPatchConfigParamsWithTimeout(timeout time.Duration) *PatchConfigParams { - return &PatchConfigParams{ - timeout: timeout, - } -} - -// NewPatchConfigParamsWithContext creates a new PatchConfigParams object -// with the ability to set a context for a request. -func NewPatchConfigParamsWithContext(ctx context.Context) *PatchConfigParams { - return &PatchConfigParams{ - Context: ctx, - } -} - -// NewPatchConfigParamsWithHTTPClient creates a new PatchConfigParams object -// with the ability to set a custom HTTPClient for a request. -func NewPatchConfigParamsWithHTTPClient(client *http.Client) *PatchConfigParams { - return &PatchConfigParams{ - HTTPClient: client, - } -} - -/* -PatchConfigParams contains all the parameters to send to the API endpoint - - for the patch config operation. - - Typically these are written to a http.Request. -*/ -type PatchConfigParams struct { - - // Configuration. - Configuration *models.DaemonConfigurationSpec - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the patch config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchConfigParams) WithDefaults() *PatchConfigParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the patch config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchConfigParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the patch config params -func (o *PatchConfigParams) WithTimeout(timeout time.Duration) *PatchConfigParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the patch config params -func (o *PatchConfigParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the patch config params -func (o *PatchConfigParams) WithContext(ctx context.Context) *PatchConfigParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the patch config params -func (o *PatchConfigParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the patch config params -func (o *PatchConfigParams) WithHTTPClient(client *http.Client) *PatchConfigParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the patch config params -func (o *PatchConfigParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithConfiguration adds the configuration to the patch config params -func (o *PatchConfigParams) WithConfiguration(configuration *models.DaemonConfigurationSpec) *PatchConfigParams { - o.SetConfiguration(configuration) - return o -} - -// SetConfiguration adds the configuration to the patch config params -func (o *PatchConfigParams) SetConfiguration(configuration *models.DaemonConfigurationSpec) { - o.Configuration = configuration -} - -// WriteToRequest writes these params to a swagger request -func (o *PatchConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Configuration != nil { - if err := r.SetBodyParam(o.Configuration); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go deleted file mode 100644 index a70a9cf33..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/daemon/patch_config_responses.go +++ /dev/null @@ -1,223 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package daemon - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PatchConfigReader is a Reader for the PatchConfig structure. -type PatchConfigReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PatchConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPatchConfigOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewPatchConfigBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPatchConfigFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPatchConfigOK creates a PatchConfigOK with default headers values -func NewPatchConfigOK() *PatchConfigOK { - return &PatchConfigOK{} -} - -/* -PatchConfigOK describes a response with status code 200, with default header values. - -Success -*/ -type PatchConfigOK struct { -} - -// IsSuccess returns true when this patch config o k response has a 2xx status code -func (o *PatchConfigOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this patch config o k response has a 3xx status code -func (o *PatchConfigOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch config o k response has a 4xx status code -func (o *PatchConfigOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch config o k response has a 5xx status code -func (o *PatchConfigOK) IsServerError() bool { - return false -} - -// IsCode returns true when this patch config o k response a status code equal to that given -func (o *PatchConfigOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PatchConfigOK) Error() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigOK ", 200) -} - -func (o *PatchConfigOK) String() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigOK ", 200) -} - -func (o *PatchConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchConfigBadRequest creates a PatchConfigBadRequest with default headers values -func NewPatchConfigBadRequest() *PatchConfigBadRequest { - return &PatchConfigBadRequest{} -} - -/* -PatchConfigBadRequest describes a response with status code 400, with default header values. - -Bad configuration parameters -*/ -type PatchConfigBadRequest struct { - Payload models.Error -} - -// IsSuccess returns true when this patch config bad request response has a 2xx status code -func (o *PatchConfigBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch config bad request response has a 3xx status code -func (o *PatchConfigBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch config bad request response has a 4xx status code -func (o *PatchConfigBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch config bad request response has a 5xx status code -func (o *PatchConfigBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this patch config bad request response a status code equal to that given -func (o *PatchConfigBadRequest) IsCode(code int) bool { - return code == 400 -} - -func (o *PatchConfigBadRequest) Error() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %+v", 400, o.Payload) -} - -func (o *PatchConfigBadRequest) String() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigBadRequest %+v", 400, o.Payload) -} - -func (o *PatchConfigBadRequest) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchConfigBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPatchConfigFailure creates a PatchConfigFailure with default headers values -func NewPatchConfigFailure() *PatchConfigFailure { - return &PatchConfigFailure{} -} - -/* -PatchConfigFailure describes a response with status code 500, with default header values. - -Recompilation failed -*/ -type PatchConfigFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this patch config failure response has a 2xx status code -func (o *PatchConfigFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch config failure response has a 3xx status code -func (o *PatchConfigFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch config failure response has a 4xx status code -func (o *PatchConfigFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch config failure response has a 5xx status code -func (o *PatchConfigFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this patch config failure response a status code equal to that given -func (o *PatchConfigFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *PatchConfigFailure) Error() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %+v", 500, o.Payload) -} - -func (o *PatchConfigFailure) String() string { - return fmt.Sprintf("[PATCH /config][%d] patchConfigFailure %+v", 500, o.Payload) -} - -func (o *PatchConfigFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchConfigFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go deleted file mode 100644 index c2e2ea793..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewDeleteEndpointIDParams creates a new DeleteEndpointIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewDeleteEndpointIDParams() *DeleteEndpointIDParams { - return &DeleteEndpointIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewDeleteEndpointIDParamsWithTimeout creates a new DeleteEndpointIDParams object -// with the ability to set a timeout on a request. -func NewDeleteEndpointIDParamsWithTimeout(timeout time.Duration) *DeleteEndpointIDParams { - return &DeleteEndpointIDParams{ - timeout: timeout, - } -} - -// NewDeleteEndpointIDParamsWithContext creates a new DeleteEndpointIDParams object -// with the ability to set a context for a request. -func NewDeleteEndpointIDParamsWithContext(ctx context.Context) *DeleteEndpointIDParams { - return &DeleteEndpointIDParams{ - Context: ctx, - } -} - -// NewDeleteEndpointIDParamsWithHTTPClient creates a new DeleteEndpointIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewDeleteEndpointIDParamsWithHTTPClient(client *http.Client) *DeleteEndpointIDParams { - return &DeleteEndpointIDParams{ - HTTPClient: client, - } -} - -/* -DeleteEndpointIDParams contains all the parameters to send to the API endpoint - - for the delete endpoint ID operation. - - Typically these are written to a http.Request. -*/ -type DeleteEndpointIDParams struct { - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the delete endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteEndpointIDParams) WithDefaults() *DeleteEndpointIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the delete endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteEndpointIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the delete endpoint ID params -func (o *DeleteEndpointIDParams) WithTimeout(timeout time.Duration) *DeleteEndpointIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the delete endpoint ID params -func (o *DeleteEndpointIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the delete endpoint ID params -func (o *DeleteEndpointIDParams) WithContext(ctx context.Context) *DeleteEndpointIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the delete endpoint ID params -func (o *DeleteEndpointIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the delete endpoint ID params -func (o *DeleteEndpointIDParams) WithHTTPClient(client *http.Client) *DeleteEndpointIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the delete endpoint ID params -func (o *DeleteEndpointIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the delete endpoint ID params -func (o *DeleteEndpointIDParams) WithID(id string) *DeleteEndpointIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the delete endpoint ID params -func (o *DeleteEndpointIDParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *DeleteEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go deleted file mode 100644 index 67b86218c..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/delete_endpoint_id_responses.go +++ /dev/null @@ -1,339 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// DeleteEndpointIDReader is a Reader for the DeleteEndpointID structure. -type DeleteEndpointIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *DeleteEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewDeleteEndpointIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 206: - result := NewDeleteEndpointIDErrors() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewDeleteEndpointIDInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewDeleteEndpointIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewDeleteEndpointIDTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewDeleteEndpointIDOK creates a DeleteEndpointIDOK with default headers values -func NewDeleteEndpointIDOK() *DeleteEndpointIDOK { - return &DeleteEndpointIDOK{} -} - -/* -DeleteEndpointIDOK describes a response with status code 200, with default header values. - -Success -*/ -type DeleteEndpointIDOK struct { -} - -// IsSuccess returns true when this delete endpoint Id o k response has a 2xx status code -func (o *DeleteEndpointIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete endpoint Id o k response has a 3xx status code -func (o *DeleteEndpointIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete endpoint Id o k response has a 4xx status code -func (o *DeleteEndpointIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete endpoint Id o k response has a 5xx status code -func (o *DeleteEndpointIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this delete endpoint Id o k response a status code equal to that given -func (o *DeleteEndpointIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *DeleteEndpointIDOK) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK ", 200) -} - -func (o *DeleteEndpointIDOK) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdOK ", 200) -} - -func (o *DeleteEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteEndpointIDErrors creates a DeleteEndpointIDErrors with default headers values -func NewDeleteEndpointIDErrors() *DeleteEndpointIDErrors { - return &DeleteEndpointIDErrors{} -} - -/* -DeleteEndpointIDErrors describes a response with status code 206, with default header values. - -Deleted with a number of errors encountered -*/ -type DeleteEndpointIDErrors struct { - Payload int64 -} - -// IsSuccess returns true when this delete endpoint Id errors response has a 2xx status code -func (o *DeleteEndpointIDErrors) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete endpoint Id errors response has a 3xx status code -func (o *DeleteEndpointIDErrors) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete endpoint Id errors response has a 4xx status code -func (o *DeleteEndpointIDErrors) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete endpoint Id errors response has a 5xx status code -func (o *DeleteEndpointIDErrors) IsServerError() bool { - return false -} - -// IsCode returns true when this delete endpoint Id errors response a status code equal to that given -func (o *DeleteEndpointIDErrors) IsCode(code int) bool { - return code == 206 -} - -func (o *DeleteEndpointIDErrors) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %+v", 206, o.Payload) -} - -func (o *DeleteEndpointIDErrors) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdErrors %+v", 206, o.Payload) -} - -func (o *DeleteEndpointIDErrors) GetPayload() int64 { - return o.Payload -} - -func (o *DeleteEndpointIDErrors) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewDeleteEndpointIDInvalid creates a DeleteEndpointIDInvalid with default headers values -func NewDeleteEndpointIDInvalid() *DeleteEndpointIDInvalid { - return &DeleteEndpointIDInvalid{} -} - -/* - DeleteEndpointIDInvalid describes a response with status code 400, with default header values. - - Invalid endpoint ID format for specified type. Details in error - -message -*/ -type DeleteEndpointIDInvalid struct { - Payload models.Error -} - -// IsSuccess returns true when this delete endpoint Id invalid response has a 2xx status code -func (o *DeleteEndpointIDInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete endpoint Id invalid response has a 3xx status code -func (o *DeleteEndpointIDInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete endpoint Id invalid response has a 4xx status code -func (o *DeleteEndpointIDInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete endpoint Id invalid response has a 5xx status code -func (o *DeleteEndpointIDInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this delete endpoint Id invalid response a status code equal to that given -func (o *DeleteEndpointIDInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *DeleteEndpointIDInvalid) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *DeleteEndpointIDInvalid) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *DeleteEndpointIDInvalid) GetPayload() models.Error { - return o.Payload -} - -func (o *DeleteEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewDeleteEndpointIDNotFound creates a DeleteEndpointIDNotFound with default headers values -func NewDeleteEndpointIDNotFound() *DeleteEndpointIDNotFound { - return &DeleteEndpointIDNotFound{} -} - -/* -DeleteEndpointIDNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type DeleteEndpointIDNotFound struct { -} - -// IsSuccess returns true when this delete endpoint Id not found response has a 2xx status code -func (o *DeleteEndpointIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete endpoint Id not found response has a 3xx status code -func (o *DeleteEndpointIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete endpoint Id not found response has a 4xx status code -func (o *DeleteEndpointIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete endpoint Id not found response has a 5xx status code -func (o *DeleteEndpointIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this delete endpoint Id not found response a status code equal to that given -func (o *DeleteEndpointIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *DeleteEndpointIDNotFound) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound ", 404) -} - -func (o *DeleteEndpointIDNotFound) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdNotFound ", 404) -} - -func (o *DeleteEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteEndpointIDTooManyRequests creates a DeleteEndpointIDTooManyRequests with default headers values -func NewDeleteEndpointIDTooManyRequests() *DeleteEndpointIDTooManyRequests { - return &DeleteEndpointIDTooManyRequests{} -} - -/* -DeleteEndpointIDTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type DeleteEndpointIDTooManyRequests struct { -} - -// IsSuccess returns true when this delete endpoint Id too many requests response has a 2xx status code -func (o *DeleteEndpointIDTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete endpoint Id too many requests response has a 3xx status code -func (o *DeleteEndpointIDTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete endpoint Id too many requests response has a 4xx status code -func (o *DeleteEndpointIDTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete endpoint Id too many requests response has a 5xx status code -func (o *DeleteEndpointIDTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this delete endpoint Id too many requests response a status code equal to that given -func (o *DeleteEndpointIDTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *DeleteEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests ", 429) -} - -func (o *DeleteEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[DELETE /endpoint/{id}][%d] deleteEndpointIdTooManyRequests ", 429) -} - -func (o *DeleteEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go deleted file mode 100644 index 34d350007..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/endpoint_client.go +++ /dev/null @@ -1,512 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new endpoint API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for endpoint API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error) - - GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error) - - GetEndpointID(params *GetEndpointIDParams, opts ...ClientOption) (*GetEndpointIDOK, error) - - GetEndpointIDConfig(params *GetEndpointIDConfigParams, opts ...ClientOption) (*GetEndpointIDConfigOK, error) - - GetEndpointIDHealthz(params *GetEndpointIDHealthzParams, opts ...ClientOption) (*GetEndpointIDHealthzOK, error) - - GetEndpointIDLabels(params *GetEndpointIDLabelsParams, opts ...ClientOption) (*GetEndpointIDLabelsOK, error) - - GetEndpointIDLog(params *GetEndpointIDLogParams, opts ...ClientOption) (*GetEndpointIDLogOK, error) - - PatchEndpointID(params *PatchEndpointIDParams, opts ...ClientOption) (*PatchEndpointIDOK, error) - - PatchEndpointIDConfig(params *PatchEndpointIDConfigParams, opts ...ClientOption) (*PatchEndpointIDConfigOK, error) - - PatchEndpointIDLabels(params *PatchEndpointIDLabelsParams, opts ...ClientOption) (*PatchEndpointIDLabelsOK, error) - - PutEndpointID(params *PutEndpointIDParams, opts ...ClientOption) (*PutEndpointIDCreated, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* - DeleteEndpointID deletes endpoint - - Deletes the endpoint specified by the ID. Deletion is imminent and - -atomic, if the deletion request is valid and the endpoint exists, -deletion will occur even if errors are encountered in the process. If -errors have been encountered, the code 202 will be returned, otherwise -200 on success. - -All resources associated with the endpoint will be freed and the -workload represented by the endpoint will be disconnected.It will no -longer be able to initiate or receive communications of any sort. -*/ -func (a *Client) DeleteEndpointID(params *DeleteEndpointIDParams, opts ...ClientOption) (*DeleteEndpointIDOK, *DeleteEndpointIDErrors, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewDeleteEndpointIDParams() - } - op := &runtime.ClientOperation{ - ID: "DeleteEndpointID", - Method: "DELETE", - PathPattern: "/endpoint/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &DeleteEndpointIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, nil, err - } - switch value := result.(type) { - case *DeleteEndpointIDOK: - return value, nil, nil - case *DeleteEndpointIDErrors: - return nil, value, nil - } - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for endpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetEndpoint retrieves a list of endpoints that have metadata matching the provided parameters - -Retrieves a list of endpoints that have metadata matching the provided parameters, or all endpoints if no parameters provided. -*/ -func (a *Client) GetEndpoint(params *GetEndpointParams, opts ...ClientOption) (*GetEndpointOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetEndpointParams() - } - op := &runtime.ClientOperation{ - ID: "GetEndpoint", - Method: "GET", - PathPattern: "/endpoint", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetEndpointReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetEndpointOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetEndpoint: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetEndpointID gets endpoint by endpoint ID - -Returns endpoint information -*/ -func (a *Client) GetEndpointID(params *GetEndpointIDParams, opts ...ClientOption) (*GetEndpointIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetEndpointIDParams() - } - op := &runtime.ClientOperation{ - ID: "GetEndpointID", - Method: "GET", - PathPattern: "/endpoint/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetEndpointIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetEndpointIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetEndpointIDConfig retrieves endpoint configuration - -Retrieves the configuration of the specified endpoint. -*/ -func (a *Client) GetEndpointIDConfig(params *GetEndpointIDConfigParams, opts ...ClientOption) (*GetEndpointIDConfigOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetEndpointIDConfigParams() - } - op := &runtime.ClientOperation{ - ID: "GetEndpointIDConfig", - Method: "GET", - PathPattern: "/endpoint/{id}/config", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetEndpointIDConfigReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetEndpointIDConfigOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetEndpointIDConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetEndpointIDHealthz retrieves the status logs associated with this endpoint -*/ -func (a *Client) GetEndpointIDHealthz(params *GetEndpointIDHealthzParams, opts ...ClientOption) (*GetEndpointIDHealthzOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetEndpointIDHealthzParams() - } - op := &runtime.ClientOperation{ - ID: "GetEndpointIDHealthz", - Method: "GET", - PathPattern: "/endpoint/{id}/healthz", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetEndpointIDHealthzReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetEndpointIDHealthzOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetEndpointIDHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetEndpointIDLabels retrieves the list of labels associated with an endpoint -*/ -func (a *Client) GetEndpointIDLabels(params *GetEndpointIDLabelsParams, opts ...ClientOption) (*GetEndpointIDLabelsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetEndpointIDLabelsParams() - } - op := &runtime.ClientOperation{ - ID: "GetEndpointIDLabels", - Method: "GET", - PathPattern: "/endpoint/{id}/labels", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetEndpointIDLabelsReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetEndpointIDLabelsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetEndpointIDLabels: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetEndpointIDLog retrieves the status logs associated with this endpoint -*/ -func (a *Client) GetEndpointIDLog(params *GetEndpointIDLogParams, opts ...ClientOption) (*GetEndpointIDLogOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetEndpointIDLogParams() - } - op := &runtime.ClientOperation{ - ID: "GetEndpointIDLog", - Method: "GET", - PathPattern: "/endpoint/{id}/log", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetEndpointIDLogReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetEndpointIDLogOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetEndpointIDLog: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PatchEndpointID modifies existing endpoint - -Applies the endpoint change request to an existing endpoint -*/ -func (a *Client) PatchEndpointID(params *PatchEndpointIDParams, opts ...ClientOption) (*PatchEndpointIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPatchEndpointIDParams() - } - op := &runtime.ClientOperation{ - ID: "PatchEndpointID", - Method: "PATCH", - PathPattern: "/endpoint/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PatchEndpointIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PatchEndpointIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PatchEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - PatchEndpointIDConfig modifies mutable endpoint configuration - - Update the configuration of an existing endpoint and regenerates & - -recompiles the corresponding programs automatically. -*/ -func (a *Client) PatchEndpointIDConfig(params *PatchEndpointIDConfigParams, opts ...ClientOption) (*PatchEndpointIDConfigOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPatchEndpointIDConfigParams() - } - op := &runtime.ClientOperation{ - ID: "PatchEndpointIDConfig", - Method: "PATCH", - PathPattern: "/endpoint/{id}/config", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PatchEndpointIDConfigReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PatchEndpointIDConfigOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PatchEndpointIDConfig: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - PatchEndpointIDLabels sets label configuration of endpoint - - Sets labels associated with an endpoint. These can be user provided or - -derived from the orchestration system. -*/ -func (a *Client) PatchEndpointIDLabels(params *PatchEndpointIDLabelsParams, opts ...ClientOption) (*PatchEndpointIDLabelsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPatchEndpointIDLabelsParams() - } - op := &runtime.ClientOperation{ - ID: "PatchEndpointIDLabels", - Method: "PATCH", - PathPattern: "/endpoint/{id}/labels", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PatchEndpointIDLabelsReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PatchEndpointIDLabelsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PatchEndpointIDLabels: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PutEndpointID creates endpoint - -Creates a new endpoint -*/ -func (a *Client) PutEndpointID(params *PutEndpointIDParams, opts ...ClientOption) (*PutEndpointIDCreated, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPutEndpointIDParams() - } - op := &runtime.ClientOperation{ - ID: "PutEndpointID", - Method: "PUT", - PathPattern: "/endpoint/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PutEndpointIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PutEndpointIDCreated) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PutEndpointID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go deleted file mode 100644 index 027970d69..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetEndpointIDConfigParams creates a new GetEndpointIDConfigParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetEndpointIDConfigParams() *GetEndpointIDConfigParams { - return &GetEndpointIDConfigParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetEndpointIDConfigParamsWithTimeout creates a new GetEndpointIDConfigParams object -// with the ability to set a timeout on a request. -func NewGetEndpointIDConfigParamsWithTimeout(timeout time.Duration) *GetEndpointIDConfigParams { - return &GetEndpointIDConfigParams{ - timeout: timeout, - } -} - -// NewGetEndpointIDConfigParamsWithContext creates a new GetEndpointIDConfigParams object -// with the ability to set a context for a request. -func NewGetEndpointIDConfigParamsWithContext(ctx context.Context) *GetEndpointIDConfigParams { - return &GetEndpointIDConfigParams{ - Context: ctx, - } -} - -// NewGetEndpointIDConfigParamsWithHTTPClient creates a new GetEndpointIDConfigParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetEndpointIDConfigParamsWithHTTPClient(client *http.Client) *GetEndpointIDConfigParams { - return &GetEndpointIDConfigParams{ - HTTPClient: client, - } -} - -/* -GetEndpointIDConfigParams contains all the parameters to send to the API endpoint - - for the get endpoint ID config operation. - - Typically these are written to a http.Request. -*/ -type GetEndpointIDConfigParams struct { - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get endpoint ID config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDConfigParams) WithDefaults() *GetEndpointIDConfigParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get endpoint ID config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDConfigParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) WithTimeout(timeout time.Duration) *GetEndpointIDConfigParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) WithContext(ctx context.Context) *GetEndpointIDConfigParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) WithHTTPClient(client *http.Client) *GetEndpointIDConfigParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) WithID(id string) *GetEndpointIDConfigParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get endpoint ID config params -func (o *GetEndpointIDConfigParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetEndpointIDConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go deleted file mode 100644 index e16cdfbf5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_config_responses.go +++ /dev/null @@ -1,215 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetEndpointIDConfigReader is a Reader for the GetEndpointIDConfig structure. -type GetEndpointIDConfigReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetEndpointIDConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetEndpointIDConfigOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetEndpointIDConfigNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewGetEndpointIDConfigTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetEndpointIDConfigOK creates a GetEndpointIDConfigOK with default headers values -func NewGetEndpointIDConfigOK() *GetEndpointIDConfigOK { - return &GetEndpointIDConfigOK{} -} - -/* -GetEndpointIDConfigOK describes a response with status code 200, with default header values. - -Success -*/ -type GetEndpointIDConfigOK struct { - Payload *models.EndpointConfigurationStatus -} - -// IsSuccess returns true when this get endpoint Id config o k response has a 2xx status code -func (o *GetEndpointIDConfigOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get endpoint Id config o k response has a 3xx status code -func (o *GetEndpointIDConfigOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id config o k response has a 4xx status code -func (o *GetEndpointIDConfigOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get endpoint Id config o k response has a 5xx status code -func (o *GetEndpointIDConfigOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id config o k response a status code equal to that given -func (o *GetEndpointIDConfigOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetEndpointIDConfigOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDConfigOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDConfigOK) GetPayload() *models.EndpointConfigurationStatus { - return o.Payload -} - -func (o *GetEndpointIDConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.EndpointConfigurationStatus) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetEndpointIDConfigNotFound creates a GetEndpointIDConfigNotFound with default headers values -func NewGetEndpointIDConfigNotFound() *GetEndpointIDConfigNotFound { - return &GetEndpointIDConfigNotFound{} -} - -/* -GetEndpointIDConfigNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type GetEndpointIDConfigNotFound struct { -} - -// IsSuccess returns true when this get endpoint Id config not found response has a 2xx status code -func (o *GetEndpointIDConfigNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id config not found response has a 3xx status code -func (o *GetEndpointIDConfigNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id config not found response has a 4xx status code -func (o *GetEndpointIDConfigNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id config not found response has a 5xx status code -func (o *GetEndpointIDConfigNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id config not found response a status code equal to that given -func (o *GetEndpointIDConfigNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetEndpointIDConfigNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound ", 404) -} - -func (o *GetEndpointIDConfigNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigNotFound ", 404) -} - -func (o *GetEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointIDConfigTooManyRequests creates a GetEndpointIDConfigTooManyRequests with default headers values -func NewGetEndpointIDConfigTooManyRequests() *GetEndpointIDConfigTooManyRequests { - return &GetEndpointIDConfigTooManyRequests{} -} - -/* -GetEndpointIDConfigTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type GetEndpointIDConfigTooManyRequests struct { -} - -// IsSuccess returns true when this get endpoint Id config too many requests response has a 2xx status code -func (o *GetEndpointIDConfigTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id config too many requests response has a 3xx status code -func (o *GetEndpointIDConfigTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id config too many requests response has a 4xx status code -func (o *GetEndpointIDConfigTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id config too many requests response has a 5xx status code -func (o *GetEndpointIDConfigTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id config too many requests response a status code equal to that given -func (o *GetEndpointIDConfigTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *GetEndpointIDConfigTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests ", 429) -} - -func (o *GetEndpointIDConfigTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/config][%d] getEndpointIdConfigTooManyRequests ", 429) -} - -func (o *GetEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go deleted file mode 100644 index 11b97e7d9..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetEndpointIDHealthzParams creates a new GetEndpointIDHealthzParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetEndpointIDHealthzParams() *GetEndpointIDHealthzParams { - return &GetEndpointIDHealthzParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetEndpointIDHealthzParamsWithTimeout creates a new GetEndpointIDHealthzParams object -// with the ability to set a timeout on a request. -func NewGetEndpointIDHealthzParamsWithTimeout(timeout time.Duration) *GetEndpointIDHealthzParams { - return &GetEndpointIDHealthzParams{ - timeout: timeout, - } -} - -// NewGetEndpointIDHealthzParamsWithContext creates a new GetEndpointIDHealthzParams object -// with the ability to set a context for a request. -func NewGetEndpointIDHealthzParamsWithContext(ctx context.Context) *GetEndpointIDHealthzParams { - return &GetEndpointIDHealthzParams{ - Context: ctx, - } -} - -// NewGetEndpointIDHealthzParamsWithHTTPClient creates a new GetEndpointIDHealthzParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetEndpointIDHealthzParamsWithHTTPClient(client *http.Client) *GetEndpointIDHealthzParams { - return &GetEndpointIDHealthzParams{ - HTTPClient: client, - } -} - -/* -GetEndpointIDHealthzParams contains all the parameters to send to the API endpoint - - for the get endpoint ID healthz operation. - - Typically these are written to a http.Request. -*/ -type GetEndpointIDHealthzParams struct { - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get endpoint ID healthz params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDHealthzParams) WithDefaults() *GetEndpointIDHealthzParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get endpoint ID healthz params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDHealthzParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) WithTimeout(timeout time.Duration) *GetEndpointIDHealthzParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) WithContext(ctx context.Context) *GetEndpointIDHealthzParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) WithHTTPClient(client *http.Client) *GetEndpointIDHealthzParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) WithID(id string) *GetEndpointIDHealthzParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get endpoint ID healthz params -func (o *GetEndpointIDHealthzParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetEndpointIDHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go deleted file mode 100644 index a7e976534..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_healthz_responses.go +++ /dev/null @@ -1,272 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetEndpointIDHealthzReader is a Reader for the GetEndpointIDHealthz structure. -type GetEndpointIDHealthzReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetEndpointIDHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetEndpointIDHealthzOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetEndpointIDHealthzInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewGetEndpointIDHealthzNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewGetEndpointIDHealthzTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetEndpointIDHealthzOK creates a GetEndpointIDHealthzOK with default headers values -func NewGetEndpointIDHealthzOK() *GetEndpointIDHealthzOK { - return &GetEndpointIDHealthzOK{} -} - -/* -GetEndpointIDHealthzOK describes a response with status code 200, with default header values. - -Success -*/ -type GetEndpointIDHealthzOK struct { - Payload *models.EndpointHealth -} - -// IsSuccess returns true when this get endpoint Id healthz o k response has a 2xx status code -func (o *GetEndpointIDHealthzOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get endpoint Id healthz o k response has a 3xx status code -func (o *GetEndpointIDHealthzOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id healthz o k response has a 4xx status code -func (o *GetEndpointIDHealthzOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get endpoint Id healthz o k response has a 5xx status code -func (o *GetEndpointIDHealthzOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id healthz o k response a status code equal to that given -func (o *GetEndpointIDHealthzOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetEndpointIDHealthzOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDHealthzOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDHealthzOK) GetPayload() *models.EndpointHealth { - return o.Payload -} - -func (o *GetEndpointIDHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.EndpointHealth) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetEndpointIDHealthzInvalid creates a GetEndpointIDHealthzInvalid with default headers values -func NewGetEndpointIDHealthzInvalid() *GetEndpointIDHealthzInvalid { - return &GetEndpointIDHealthzInvalid{} -} - -/* -GetEndpointIDHealthzInvalid describes a response with status code 400, with default header values. - -Invalid identity provided -*/ -type GetEndpointIDHealthzInvalid struct { -} - -// IsSuccess returns true when this get endpoint Id healthz invalid response has a 2xx status code -func (o *GetEndpointIDHealthzInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id healthz invalid response has a 3xx status code -func (o *GetEndpointIDHealthzInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id healthz invalid response has a 4xx status code -func (o *GetEndpointIDHealthzInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id healthz invalid response has a 5xx status code -func (o *GetEndpointIDHealthzInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id healthz invalid response a status code equal to that given -func (o *GetEndpointIDHealthzInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *GetEndpointIDHealthzInvalid) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid ", 400) -} - -func (o *GetEndpointIDHealthzInvalid) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzInvalid ", 400) -} - -func (o *GetEndpointIDHealthzInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointIDHealthzNotFound creates a GetEndpointIDHealthzNotFound with default headers values -func NewGetEndpointIDHealthzNotFound() *GetEndpointIDHealthzNotFound { - return &GetEndpointIDHealthzNotFound{} -} - -/* -GetEndpointIDHealthzNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type GetEndpointIDHealthzNotFound struct { -} - -// IsSuccess returns true when this get endpoint Id healthz not found response has a 2xx status code -func (o *GetEndpointIDHealthzNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id healthz not found response has a 3xx status code -func (o *GetEndpointIDHealthzNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id healthz not found response has a 4xx status code -func (o *GetEndpointIDHealthzNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id healthz not found response has a 5xx status code -func (o *GetEndpointIDHealthzNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id healthz not found response a status code equal to that given -func (o *GetEndpointIDHealthzNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetEndpointIDHealthzNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound ", 404) -} - -func (o *GetEndpointIDHealthzNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzNotFound ", 404) -} - -func (o *GetEndpointIDHealthzNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointIDHealthzTooManyRequests creates a GetEndpointIDHealthzTooManyRequests with default headers values -func NewGetEndpointIDHealthzTooManyRequests() *GetEndpointIDHealthzTooManyRequests { - return &GetEndpointIDHealthzTooManyRequests{} -} - -/* -GetEndpointIDHealthzTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type GetEndpointIDHealthzTooManyRequests struct { -} - -// IsSuccess returns true when this get endpoint Id healthz too many requests response has a 2xx status code -func (o *GetEndpointIDHealthzTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id healthz too many requests response has a 3xx status code -func (o *GetEndpointIDHealthzTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id healthz too many requests response has a 4xx status code -func (o *GetEndpointIDHealthzTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id healthz too many requests response has a 5xx status code -func (o *GetEndpointIDHealthzTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id healthz too many requests response a status code equal to that given -func (o *GetEndpointIDHealthzTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *GetEndpointIDHealthzTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests ", 429) -} - -func (o *GetEndpointIDHealthzTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/healthz][%d] getEndpointIdHealthzTooManyRequests ", 429) -} - -func (o *GetEndpointIDHealthzTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go deleted file mode 100644 index 9ed2d6d6b..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetEndpointIDLabelsParams creates a new GetEndpointIDLabelsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetEndpointIDLabelsParams() *GetEndpointIDLabelsParams { - return &GetEndpointIDLabelsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetEndpointIDLabelsParamsWithTimeout creates a new GetEndpointIDLabelsParams object -// with the ability to set a timeout on a request. -func NewGetEndpointIDLabelsParamsWithTimeout(timeout time.Duration) *GetEndpointIDLabelsParams { - return &GetEndpointIDLabelsParams{ - timeout: timeout, - } -} - -// NewGetEndpointIDLabelsParamsWithContext creates a new GetEndpointIDLabelsParams object -// with the ability to set a context for a request. -func NewGetEndpointIDLabelsParamsWithContext(ctx context.Context) *GetEndpointIDLabelsParams { - return &GetEndpointIDLabelsParams{ - Context: ctx, - } -} - -// NewGetEndpointIDLabelsParamsWithHTTPClient creates a new GetEndpointIDLabelsParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetEndpointIDLabelsParamsWithHTTPClient(client *http.Client) *GetEndpointIDLabelsParams { - return &GetEndpointIDLabelsParams{ - HTTPClient: client, - } -} - -/* -GetEndpointIDLabelsParams contains all the parameters to send to the API endpoint - - for the get endpoint ID labels operation. - - Typically these are written to a http.Request. -*/ -type GetEndpointIDLabelsParams struct { - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get endpoint ID labels params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDLabelsParams) WithDefaults() *GetEndpointIDLabelsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get endpoint ID labels params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDLabelsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) WithTimeout(timeout time.Duration) *GetEndpointIDLabelsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) WithContext(ctx context.Context) *GetEndpointIDLabelsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) WithHTTPClient(client *http.Client) *GetEndpointIDLabelsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) WithID(id string) *GetEndpointIDLabelsParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get endpoint ID labels params -func (o *GetEndpointIDLabelsParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetEndpointIDLabelsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go deleted file mode 100644 index 3f4e5138d..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_labels_responses.go +++ /dev/null @@ -1,215 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetEndpointIDLabelsReader is a Reader for the GetEndpointIDLabels structure. -type GetEndpointIDLabelsReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetEndpointIDLabelsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetEndpointIDLabelsOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetEndpointIDLabelsNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewGetEndpointIDLabelsTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetEndpointIDLabelsOK creates a GetEndpointIDLabelsOK with default headers values -func NewGetEndpointIDLabelsOK() *GetEndpointIDLabelsOK { - return &GetEndpointIDLabelsOK{} -} - -/* -GetEndpointIDLabelsOK describes a response with status code 200, with default header values. - -Success -*/ -type GetEndpointIDLabelsOK struct { - Payload *models.LabelConfiguration -} - -// IsSuccess returns true when this get endpoint Id labels o k response has a 2xx status code -func (o *GetEndpointIDLabelsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get endpoint Id labels o k response has a 3xx status code -func (o *GetEndpointIDLabelsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id labels o k response has a 4xx status code -func (o *GetEndpointIDLabelsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get endpoint Id labels o k response has a 5xx status code -func (o *GetEndpointIDLabelsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id labels o k response a status code equal to that given -func (o *GetEndpointIDLabelsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetEndpointIDLabelsOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDLabelsOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDLabelsOK) GetPayload() *models.LabelConfiguration { - return o.Payload -} - -func (o *GetEndpointIDLabelsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.LabelConfiguration) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetEndpointIDLabelsNotFound creates a GetEndpointIDLabelsNotFound with default headers values -func NewGetEndpointIDLabelsNotFound() *GetEndpointIDLabelsNotFound { - return &GetEndpointIDLabelsNotFound{} -} - -/* -GetEndpointIDLabelsNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type GetEndpointIDLabelsNotFound struct { -} - -// IsSuccess returns true when this get endpoint Id labels not found response has a 2xx status code -func (o *GetEndpointIDLabelsNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id labels not found response has a 3xx status code -func (o *GetEndpointIDLabelsNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id labels not found response has a 4xx status code -func (o *GetEndpointIDLabelsNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id labels not found response has a 5xx status code -func (o *GetEndpointIDLabelsNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id labels not found response a status code equal to that given -func (o *GetEndpointIDLabelsNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetEndpointIDLabelsNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound ", 404) -} - -func (o *GetEndpointIDLabelsNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsNotFound ", 404) -} - -func (o *GetEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointIDLabelsTooManyRequests creates a GetEndpointIDLabelsTooManyRequests with default headers values -func NewGetEndpointIDLabelsTooManyRequests() *GetEndpointIDLabelsTooManyRequests { - return &GetEndpointIDLabelsTooManyRequests{} -} - -/* -GetEndpointIDLabelsTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type GetEndpointIDLabelsTooManyRequests struct { -} - -// IsSuccess returns true when this get endpoint Id labels too many requests response has a 2xx status code -func (o *GetEndpointIDLabelsTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id labels too many requests response has a 3xx status code -func (o *GetEndpointIDLabelsTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id labels too many requests response has a 4xx status code -func (o *GetEndpointIDLabelsTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id labels too many requests response has a 5xx status code -func (o *GetEndpointIDLabelsTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id labels too many requests response a status code equal to that given -func (o *GetEndpointIDLabelsTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *GetEndpointIDLabelsTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests ", 429) -} - -func (o *GetEndpointIDLabelsTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/labels][%d] getEndpointIdLabelsTooManyRequests ", 429) -} - -func (o *GetEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go deleted file mode 100644 index 32bec87cc..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetEndpointIDLogParams creates a new GetEndpointIDLogParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetEndpointIDLogParams() *GetEndpointIDLogParams { - return &GetEndpointIDLogParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetEndpointIDLogParamsWithTimeout creates a new GetEndpointIDLogParams object -// with the ability to set a timeout on a request. -func NewGetEndpointIDLogParamsWithTimeout(timeout time.Duration) *GetEndpointIDLogParams { - return &GetEndpointIDLogParams{ - timeout: timeout, - } -} - -// NewGetEndpointIDLogParamsWithContext creates a new GetEndpointIDLogParams object -// with the ability to set a context for a request. -func NewGetEndpointIDLogParamsWithContext(ctx context.Context) *GetEndpointIDLogParams { - return &GetEndpointIDLogParams{ - Context: ctx, - } -} - -// NewGetEndpointIDLogParamsWithHTTPClient creates a new GetEndpointIDLogParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetEndpointIDLogParamsWithHTTPClient(client *http.Client) *GetEndpointIDLogParams { - return &GetEndpointIDLogParams{ - HTTPClient: client, - } -} - -/* -GetEndpointIDLogParams contains all the parameters to send to the API endpoint - - for the get endpoint ID log operation. - - Typically these are written to a http.Request. -*/ -type GetEndpointIDLogParams struct { - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get endpoint ID log params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDLogParams) WithDefaults() *GetEndpointIDLogParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get endpoint ID log params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDLogParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get endpoint ID log params -func (o *GetEndpointIDLogParams) WithTimeout(timeout time.Duration) *GetEndpointIDLogParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get endpoint ID log params -func (o *GetEndpointIDLogParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get endpoint ID log params -func (o *GetEndpointIDLogParams) WithContext(ctx context.Context) *GetEndpointIDLogParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get endpoint ID log params -func (o *GetEndpointIDLogParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get endpoint ID log params -func (o *GetEndpointIDLogParams) WithHTTPClient(client *http.Client) *GetEndpointIDLogParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get endpoint ID log params -func (o *GetEndpointIDLogParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get endpoint ID log params -func (o *GetEndpointIDLogParams) WithID(id string) *GetEndpointIDLogParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get endpoint ID log params -func (o *GetEndpointIDLogParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetEndpointIDLogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go deleted file mode 100644 index db7227d1d..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_log_responses.go +++ /dev/null @@ -1,270 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetEndpointIDLogReader is a Reader for the GetEndpointIDLog structure. -type GetEndpointIDLogReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetEndpointIDLogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetEndpointIDLogOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetEndpointIDLogInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewGetEndpointIDLogNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewGetEndpointIDLogTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetEndpointIDLogOK creates a GetEndpointIDLogOK with default headers values -func NewGetEndpointIDLogOK() *GetEndpointIDLogOK { - return &GetEndpointIDLogOK{} -} - -/* -GetEndpointIDLogOK describes a response with status code 200, with default header values. - -Success -*/ -type GetEndpointIDLogOK struct { - Payload models.EndpointStatusLog -} - -// IsSuccess returns true when this get endpoint Id log o k response has a 2xx status code -func (o *GetEndpointIDLogOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get endpoint Id log o k response has a 3xx status code -func (o *GetEndpointIDLogOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id log o k response has a 4xx status code -func (o *GetEndpointIDLogOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get endpoint Id log o k response has a 5xx status code -func (o *GetEndpointIDLogOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id log o k response a status code equal to that given -func (o *GetEndpointIDLogOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetEndpointIDLogOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDLogOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDLogOK) GetPayload() models.EndpointStatusLog { - return o.Payload -} - -func (o *GetEndpointIDLogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetEndpointIDLogInvalid creates a GetEndpointIDLogInvalid with default headers values -func NewGetEndpointIDLogInvalid() *GetEndpointIDLogInvalid { - return &GetEndpointIDLogInvalid{} -} - -/* -GetEndpointIDLogInvalid describes a response with status code 400, with default header values. - -Invalid identity provided -*/ -type GetEndpointIDLogInvalid struct { -} - -// IsSuccess returns true when this get endpoint Id log invalid response has a 2xx status code -func (o *GetEndpointIDLogInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id log invalid response has a 3xx status code -func (o *GetEndpointIDLogInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id log invalid response has a 4xx status code -func (o *GetEndpointIDLogInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id log invalid response has a 5xx status code -func (o *GetEndpointIDLogInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id log invalid response a status code equal to that given -func (o *GetEndpointIDLogInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *GetEndpointIDLogInvalid) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid ", 400) -} - -func (o *GetEndpointIDLogInvalid) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogInvalid ", 400) -} - -func (o *GetEndpointIDLogInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointIDLogNotFound creates a GetEndpointIDLogNotFound with default headers values -func NewGetEndpointIDLogNotFound() *GetEndpointIDLogNotFound { - return &GetEndpointIDLogNotFound{} -} - -/* -GetEndpointIDLogNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type GetEndpointIDLogNotFound struct { -} - -// IsSuccess returns true when this get endpoint Id log not found response has a 2xx status code -func (o *GetEndpointIDLogNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id log not found response has a 3xx status code -func (o *GetEndpointIDLogNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id log not found response has a 4xx status code -func (o *GetEndpointIDLogNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id log not found response has a 5xx status code -func (o *GetEndpointIDLogNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id log not found response a status code equal to that given -func (o *GetEndpointIDLogNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetEndpointIDLogNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound ", 404) -} - -func (o *GetEndpointIDLogNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogNotFound ", 404) -} - -func (o *GetEndpointIDLogNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointIDLogTooManyRequests creates a GetEndpointIDLogTooManyRequests with default headers values -func NewGetEndpointIDLogTooManyRequests() *GetEndpointIDLogTooManyRequests { - return &GetEndpointIDLogTooManyRequests{} -} - -/* -GetEndpointIDLogTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type GetEndpointIDLogTooManyRequests struct { -} - -// IsSuccess returns true when this get endpoint Id log too many requests response has a 2xx status code -func (o *GetEndpointIDLogTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id log too many requests response has a 3xx status code -func (o *GetEndpointIDLogTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id log too many requests response has a 4xx status code -func (o *GetEndpointIDLogTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id log too many requests response has a 5xx status code -func (o *GetEndpointIDLogTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id log too many requests response a status code equal to that given -func (o *GetEndpointIDLogTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *GetEndpointIDLogTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests ", 429) -} - -func (o *GetEndpointIDLogTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}/log][%d] getEndpointIdLogTooManyRequests ", 429) -} - -func (o *GetEndpointIDLogTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go deleted file mode 100644 index d43637631..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetEndpointIDParams creates a new GetEndpointIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetEndpointIDParams() *GetEndpointIDParams { - return &GetEndpointIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetEndpointIDParamsWithTimeout creates a new GetEndpointIDParams object -// with the ability to set a timeout on a request. -func NewGetEndpointIDParamsWithTimeout(timeout time.Duration) *GetEndpointIDParams { - return &GetEndpointIDParams{ - timeout: timeout, - } -} - -// NewGetEndpointIDParamsWithContext creates a new GetEndpointIDParams object -// with the ability to set a context for a request. -func NewGetEndpointIDParamsWithContext(ctx context.Context) *GetEndpointIDParams { - return &GetEndpointIDParams{ - Context: ctx, - } -} - -// NewGetEndpointIDParamsWithHTTPClient creates a new GetEndpointIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetEndpointIDParamsWithHTTPClient(client *http.Client) *GetEndpointIDParams { - return &GetEndpointIDParams{ - HTTPClient: client, - } -} - -/* -GetEndpointIDParams contains all the parameters to send to the API endpoint - - for the get endpoint ID operation. - - Typically these are written to a http.Request. -*/ -type GetEndpointIDParams struct { - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDParams) WithDefaults() *GetEndpointIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get endpoint ID params -func (o *GetEndpointIDParams) WithTimeout(timeout time.Duration) *GetEndpointIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get endpoint ID params -func (o *GetEndpointIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get endpoint ID params -func (o *GetEndpointIDParams) WithContext(ctx context.Context) *GetEndpointIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get endpoint ID params -func (o *GetEndpointIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get endpoint ID params -func (o *GetEndpointIDParams) WithHTTPClient(client *http.Client) *GetEndpointIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get endpoint ID params -func (o *GetEndpointIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get endpoint ID params -func (o *GetEndpointIDParams) WithID(id string) *GetEndpointIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get endpoint ID params -func (o *GetEndpointIDParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go deleted file mode 100644 index 4d07e3aa4..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_id_responses.go +++ /dev/null @@ -1,282 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetEndpointIDReader is a Reader for the GetEndpointID structure. -type GetEndpointIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetEndpointIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetEndpointIDInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewGetEndpointIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewGetEndpointIDTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetEndpointIDOK creates a GetEndpointIDOK with default headers values -func NewGetEndpointIDOK() *GetEndpointIDOK { - return &GetEndpointIDOK{} -} - -/* -GetEndpointIDOK describes a response with status code 200, with default header values. - -Success -*/ -type GetEndpointIDOK struct { - Payload *models.Endpoint -} - -// IsSuccess returns true when this get endpoint Id o k response has a 2xx status code -func (o *GetEndpointIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get endpoint Id o k response has a 3xx status code -func (o *GetEndpointIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id o k response has a 4xx status code -func (o *GetEndpointIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get endpoint Id o k response has a 5xx status code -func (o *GetEndpointIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id o k response a status code equal to that given -func (o *GetEndpointIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetEndpointIDOK) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDOK) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdOK %+v", 200, o.Payload) -} - -func (o *GetEndpointIDOK) GetPayload() *models.Endpoint { - return o.Payload -} - -func (o *GetEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Endpoint) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetEndpointIDInvalid creates a GetEndpointIDInvalid with default headers values -func NewGetEndpointIDInvalid() *GetEndpointIDInvalid { - return &GetEndpointIDInvalid{} -} - -/* -GetEndpointIDInvalid describes a response with status code 400, with default header values. - -Invalid endpoint ID format for specified type -*/ -type GetEndpointIDInvalid struct { - Payload models.Error -} - -// IsSuccess returns true when this get endpoint Id invalid response has a 2xx status code -func (o *GetEndpointIDInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id invalid response has a 3xx status code -func (o *GetEndpointIDInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id invalid response has a 4xx status code -func (o *GetEndpointIDInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id invalid response has a 5xx status code -func (o *GetEndpointIDInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id invalid response a status code equal to that given -func (o *GetEndpointIDInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *GetEndpointIDInvalid) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *GetEndpointIDInvalid) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *GetEndpointIDInvalid) GetPayload() models.Error { - return o.Payload -} - -func (o *GetEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetEndpointIDNotFound creates a GetEndpointIDNotFound with default headers values -func NewGetEndpointIDNotFound() *GetEndpointIDNotFound { - return &GetEndpointIDNotFound{} -} - -/* -GetEndpointIDNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type GetEndpointIDNotFound struct { -} - -// IsSuccess returns true when this get endpoint Id not found response has a 2xx status code -func (o *GetEndpointIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id not found response has a 3xx status code -func (o *GetEndpointIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id not found response has a 4xx status code -func (o *GetEndpointIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id not found response has a 5xx status code -func (o *GetEndpointIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id not found response a status code equal to that given -func (o *GetEndpointIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetEndpointIDNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound ", 404) -} - -func (o *GetEndpointIDNotFound) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdNotFound ", 404) -} - -func (o *GetEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointIDTooManyRequests creates a GetEndpointIDTooManyRequests with default headers values -func NewGetEndpointIDTooManyRequests() *GetEndpointIDTooManyRequests { - return &GetEndpointIDTooManyRequests{} -} - -/* -GetEndpointIDTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type GetEndpointIDTooManyRequests struct { -} - -// IsSuccess returns true when this get endpoint Id too many requests response has a 2xx status code -func (o *GetEndpointIDTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint Id too many requests response has a 3xx status code -func (o *GetEndpointIDTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint Id too many requests response has a 4xx status code -func (o *GetEndpointIDTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint Id too many requests response has a 5xx status code -func (o *GetEndpointIDTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint Id too many requests response a status code equal to that given -func (o *GetEndpointIDTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *GetEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests ", 429) -} - -func (o *GetEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint/{id}][%d] getEndpointIdTooManyRequests ", 429) -} - -func (o *GetEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_parameters.go deleted file mode 100644 index fa20da9d3..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_parameters.go +++ /dev/null @@ -1,157 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewGetEndpointParams creates a new GetEndpointParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetEndpointParams() *GetEndpointParams { - return &GetEndpointParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetEndpointParamsWithTimeout creates a new GetEndpointParams object -// with the ability to set a timeout on a request. -func NewGetEndpointParamsWithTimeout(timeout time.Duration) *GetEndpointParams { - return &GetEndpointParams{ - timeout: timeout, - } -} - -// NewGetEndpointParamsWithContext creates a new GetEndpointParams object -// with the ability to set a context for a request. -func NewGetEndpointParamsWithContext(ctx context.Context) *GetEndpointParams { - return &GetEndpointParams{ - Context: ctx, - } -} - -// NewGetEndpointParamsWithHTTPClient creates a new GetEndpointParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetEndpointParamsWithHTTPClient(client *http.Client) *GetEndpointParams { - return &GetEndpointParams{ - HTTPClient: client, - } -} - -/* -GetEndpointParams contains all the parameters to send to the API endpoint - - for the get endpoint operation. - - Typically these are written to a http.Request. -*/ -type GetEndpointParams struct { - - /* Labels. - - List of labels - - */ - Labels models.Labels - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get endpoint params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointParams) WithDefaults() *GetEndpointParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get endpoint params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetEndpointParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get endpoint params -func (o *GetEndpointParams) WithTimeout(timeout time.Duration) *GetEndpointParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get endpoint params -func (o *GetEndpointParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get endpoint params -func (o *GetEndpointParams) WithContext(ctx context.Context) *GetEndpointParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get endpoint params -func (o *GetEndpointParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get endpoint params -func (o *GetEndpointParams) WithHTTPClient(client *http.Client) *GetEndpointParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get endpoint params -func (o *GetEndpointParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithLabels adds the labels to the get endpoint params -func (o *GetEndpointParams) WithLabels(labels models.Labels) *GetEndpointParams { - o.SetLabels(labels) - return o -} - -// SetLabels adds the labels to the get endpoint params -func (o *GetEndpointParams) SetLabels(labels models.Labels) { - o.Labels = labels -} - -// WriteToRequest writes these params to a swagger request -func (o *GetEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Labels != nil { - if err := r.SetBodyParam(o.Labels); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go deleted file mode 100644 index 5193537ea..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/get_endpoint_responses.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetEndpointReader is a Reader for the GetEndpoint structure. -type GetEndpointReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetEndpointReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetEndpointOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetEndpointNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewGetEndpointTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetEndpointOK creates a GetEndpointOK with default headers values -func NewGetEndpointOK() *GetEndpointOK { - return &GetEndpointOK{} -} - -/* -GetEndpointOK describes a response with status code 200, with default header values. - -Success -*/ -type GetEndpointOK struct { - Payload []*models.Endpoint -} - -// IsSuccess returns true when this get endpoint o k response has a 2xx status code -func (o *GetEndpointOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get endpoint o k response has a 3xx status code -func (o *GetEndpointOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint o k response has a 4xx status code -func (o *GetEndpointOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get endpoint o k response has a 5xx status code -func (o *GetEndpointOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint o k response a status code equal to that given -func (o *GetEndpointOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetEndpointOK) Error() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %+v", 200, o.Payload) -} - -func (o *GetEndpointOK) String() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointOK %+v", 200, o.Payload) -} - -func (o *GetEndpointOK) GetPayload() []*models.Endpoint { - return o.Payload -} - -func (o *GetEndpointOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetEndpointNotFound creates a GetEndpointNotFound with default headers values -func NewGetEndpointNotFound() *GetEndpointNotFound { - return &GetEndpointNotFound{} -} - -/* -GetEndpointNotFound describes a response with status code 404, with default header values. - -Endpoints with provided parameters not found -*/ -type GetEndpointNotFound struct { -} - -// IsSuccess returns true when this get endpoint not found response has a 2xx status code -func (o *GetEndpointNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint not found response has a 3xx status code -func (o *GetEndpointNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint not found response has a 4xx status code -func (o *GetEndpointNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint not found response has a 5xx status code -func (o *GetEndpointNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint not found response a status code equal to that given -func (o *GetEndpointNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetEndpointNotFound) Error() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound ", 404) -} - -func (o *GetEndpointNotFound) String() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointNotFound ", 404) -} - -func (o *GetEndpointNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetEndpointTooManyRequests creates a GetEndpointTooManyRequests with default headers values -func NewGetEndpointTooManyRequests() *GetEndpointTooManyRequests { - return &GetEndpointTooManyRequests{} -} - -/* -GetEndpointTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type GetEndpointTooManyRequests struct { -} - -// IsSuccess returns true when this get endpoint too many requests response has a 2xx status code -func (o *GetEndpointTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get endpoint too many requests response has a 3xx status code -func (o *GetEndpointTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get endpoint too many requests response has a 4xx status code -func (o *GetEndpointTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this get endpoint too many requests response has a 5xx status code -func (o *GetEndpointTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this get endpoint too many requests response a status code equal to that given -func (o *GetEndpointTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *GetEndpointTooManyRequests) Error() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests ", 429) -} - -func (o *GetEndpointTooManyRequests) String() string { - return fmt.Sprintf("[GET /endpoint][%d] getEndpointTooManyRequests ", 429) -} - -func (o *GetEndpointTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go deleted file mode 100644 index a1b33a28d..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_parameters.go +++ /dev/null @@ -1,187 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPatchEndpointIDConfigParams creates a new PatchEndpointIDConfigParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPatchEndpointIDConfigParams() *PatchEndpointIDConfigParams { - return &PatchEndpointIDConfigParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPatchEndpointIDConfigParamsWithTimeout creates a new PatchEndpointIDConfigParams object -// with the ability to set a timeout on a request. -func NewPatchEndpointIDConfigParamsWithTimeout(timeout time.Duration) *PatchEndpointIDConfigParams { - return &PatchEndpointIDConfigParams{ - timeout: timeout, - } -} - -// NewPatchEndpointIDConfigParamsWithContext creates a new PatchEndpointIDConfigParams object -// with the ability to set a context for a request. -func NewPatchEndpointIDConfigParamsWithContext(ctx context.Context) *PatchEndpointIDConfigParams { - return &PatchEndpointIDConfigParams{ - Context: ctx, - } -} - -// NewPatchEndpointIDConfigParamsWithHTTPClient creates a new PatchEndpointIDConfigParams object -// with the ability to set a custom HTTPClient for a request. -func NewPatchEndpointIDConfigParamsWithHTTPClient(client *http.Client) *PatchEndpointIDConfigParams { - return &PatchEndpointIDConfigParams{ - HTTPClient: client, - } -} - -/* -PatchEndpointIDConfigParams contains all the parameters to send to the API endpoint - - for the patch endpoint ID config operation. - - Typically these are written to a http.Request. -*/ -type PatchEndpointIDConfigParams struct { - - // EndpointConfiguration. - EndpointConfiguration *models.EndpointConfigurationSpec - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the patch endpoint ID config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchEndpointIDConfigParams) WithDefaults() *PatchEndpointIDConfigParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the patch endpoint ID config params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchEndpointIDConfigParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) WithTimeout(timeout time.Duration) *PatchEndpointIDConfigParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) WithContext(ctx context.Context) *PatchEndpointIDConfigParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) WithHTTPClient(client *http.Client) *PatchEndpointIDConfigParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithEndpointConfiguration adds the endpointConfiguration to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) WithEndpointConfiguration(endpointConfiguration *models.EndpointConfigurationSpec) *PatchEndpointIDConfigParams { - o.SetEndpointConfiguration(endpointConfiguration) - return o -} - -// SetEndpointConfiguration adds the endpointConfiguration to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) SetEndpointConfiguration(endpointConfiguration *models.EndpointConfigurationSpec) { - o.EndpointConfiguration = endpointConfiguration -} - -// WithID adds the id to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) WithID(id string) *PatchEndpointIDConfigParams { - o.SetID(id) - return o -} - -// SetID adds the id to the patch endpoint ID config params -func (o *PatchEndpointIDConfigParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *PatchEndpointIDConfigParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.EndpointConfiguration != nil { - if err := r.SetBodyParam(o.EndpointConfiguration); err != nil { - return err - } - } - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go deleted file mode 100644 index ee7bdda31..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_config_responses.go +++ /dev/null @@ -1,327 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PatchEndpointIDConfigReader is a Reader for the PatchEndpointIDConfig structure. -type PatchEndpointIDConfigReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PatchEndpointIDConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPatchEndpointIDConfigOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewPatchEndpointIDConfigInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewPatchEndpointIDConfigNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewPatchEndpointIDConfigTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPatchEndpointIDConfigFailed() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPatchEndpointIDConfigOK creates a PatchEndpointIDConfigOK with default headers values -func NewPatchEndpointIDConfigOK() *PatchEndpointIDConfigOK { - return &PatchEndpointIDConfigOK{} -} - -/* -PatchEndpointIDConfigOK describes a response with status code 200, with default header values. - -Success -*/ -type PatchEndpointIDConfigOK struct { -} - -// IsSuccess returns true when this patch endpoint Id config o k response has a 2xx status code -func (o *PatchEndpointIDConfigOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this patch endpoint Id config o k response has a 3xx status code -func (o *PatchEndpointIDConfigOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id config o k response has a 4xx status code -func (o *PatchEndpointIDConfigOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch endpoint Id config o k response has a 5xx status code -func (o *PatchEndpointIDConfigOK) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id config o k response a status code equal to that given -func (o *PatchEndpointIDConfigOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PatchEndpointIDConfigOK) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK ", 200) -} - -func (o *PatchEndpointIDConfigOK) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigOK ", 200) -} - -func (o *PatchEndpointIDConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDConfigInvalid creates a PatchEndpointIDConfigInvalid with default headers values -func NewPatchEndpointIDConfigInvalid() *PatchEndpointIDConfigInvalid { - return &PatchEndpointIDConfigInvalid{} -} - -/* -PatchEndpointIDConfigInvalid describes a response with status code 400, with default header values. - -Invalid configuration request -*/ -type PatchEndpointIDConfigInvalid struct { -} - -// IsSuccess returns true when this patch endpoint Id config invalid response has a 2xx status code -func (o *PatchEndpointIDConfigInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id config invalid response has a 3xx status code -func (o *PatchEndpointIDConfigInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id config invalid response has a 4xx status code -func (o *PatchEndpointIDConfigInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id config invalid response has a 5xx status code -func (o *PatchEndpointIDConfigInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id config invalid response a status code equal to that given -func (o *PatchEndpointIDConfigInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *PatchEndpointIDConfigInvalid) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid ", 400) -} - -func (o *PatchEndpointIDConfigInvalid) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigInvalid ", 400) -} - -func (o *PatchEndpointIDConfigInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDConfigNotFound creates a PatchEndpointIDConfigNotFound with default headers values -func NewPatchEndpointIDConfigNotFound() *PatchEndpointIDConfigNotFound { - return &PatchEndpointIDConfigNotFound{} -} - -/* -PatchEndpointIDConfigNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type PatchEndpointIDConfigNotFound struct { -} - -// IsSuccess returns true when this patch endpoint Id config not found response has a 2xx status code -func (o *PatchEndpointIDConfigNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id config not found response has a 3xx status code -func (o *PatchEndpointIDConfigNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id config not found response has a 4xx status code -func (o *PatchEndpointIDConfigNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id config not found response has a 5xx status code -func (o *PatchEndpointIDConfigNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id config not found response a status code equal to that given -func (o *PatchEndpointIDConfigNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *PatchEndpointIDConfigNotFound) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound ", 404) -} - -func (o *PatchEndpointIDConfigNotFound) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigNotFound ", 404) -} - -func (o *PatchEndpointIDConfigNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDConfigTooManyRequests creates a PatchEndpointIDConfigTooManyRequests with default headers values -func NewPatchEndpointIDConfigTooManyRequests() *PatchEndpointIDConfigTooManyRequests { - return &PatchEndpointIDConfigTooManyRequests{} -} - -/* -PatchEndpointIDConfigTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type PatchEndpointIDConfigTooManyRequests struct { -} - -// IsSuccess returns true when this patch endpoint Id config too many requests response has a 2xx status code -func (o *PatchEndpointIDConfigTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id config too many requests response has a 3xx status code -func (o *PatchEndpointIDConfigTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id config too many requests response has a 4xx status code -func (o *PatchEndpointIDConfigTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id config too many requests response has a 5xx status code -func (o *PatchEndpointIDConfigTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id config too many requests response a status code equal to that given -func (o *PatchEndpointIDConfigTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *PatchEndpointIDConfigTooManyRequests) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests ", 429) -} - -func (o *PatchEndpointIDConfigTooManyRequests) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigTooManyRequests ", 429) -} - -func (o *PatchEndpointIDConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDConfigFailed creates a PatchEndpointIDConfigFailed with default headers values -func NewPatchEndpointIDConfigFailed() *PatchEndpointIDConfigFailed { - return &PatchEndpointIDConfigFailed{} -} - -/* -PatchEndpointIDConfigFailed describes a response with status code 500, with default header values. - -Update failed. Details in message. -*/ -type PatchEndpointIDConfigFailed struct { - Payload models.Error -} - -// IsSuccess returns true when this patch endpoint Id config failed response has a 2xx status code -func (o *PatchEndpointIDConfigFailed) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id config failed response has a 3xx status code -func (o *PatchEndpointIDConfigFailed) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id config failed response has a 4xx status code -func (o *PatchEndpointIDConfigFailed) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch endpoint Id config failed response has a 5xx status code -func (o *PatchEndpointIDConfigFailed) IsServerError() bool { - return true -} - -// IsCode returns true when this patch endpoint Id config failed response a status code equal to that given -func (o *PatchEndpointIDConfigFailed) IsCode(code int) bool { - return code == 500 -} - -func (o *PatchEndpointIDConfigFailed) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %+v", 500, o.Payload) -} - -func (o *PatchEndpointIDConfigFailed) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/config][%d] patchEndpointIdConfigFailed %+v", 500, o.Payload) -} - -func (o *PatchEndpointIDConfigFailed) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchEndpointIDConfigFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go deleted file mode 100644 index db93b22dd..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_parameters.go +++ /dev/null @@ -1,187 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPatchEndpointIDLabelsParams creates a new PatchEndpointIDLabelsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPatchEndpointIDLabelsParams() *PatchEndpointIDLabelsParams { - return &PatchEndpointIDLabelsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPatchEndpointIDLabelsParamsWithTimeout creates a new PatchEndpointIDLabelsParams object -// with the ability to set a timeout on a request. -func NewPatchEndpointIDLabelsParamsWithTimeout(timeout time.Duration) *PatchEndpointIDLabelsParams { - return &PatchEndpointIDLabelsParams{ - timeout: timeout, - } -} - -// NewPatchEndpointIDLabelsParamsWithContext creates a new PatchEndpointIDLabelsParams object -// with the ability to set a context for a request. -func NewPatchEndpointIDLabelsParamsWithContext(ctx context.Context) *PatchEndpointIDLabelsParams { - return &PatchEndpointIDLabelsParams{ - Context: ctx, - } -} - -// NewPatchEndpointIDLabelsParamsWithHTTPClient creates a new PatchEndpointIDLabelsParams object -// with the ability to set a custom HTTPClient for a request. -func NewPatchEndpointIDLabelsParamsWithHTTPClient(client *http.Client) *PatchEndpointIDLabelsParams { - return &PatchEndpointIDLabelsParams{ - HTTPClient: client, - } -} - -/* -PatchEndpointIDLabelsParams contains all the parameters to send to the API endpoint - - for the patch endpoint ID labels operation. - - Typically these are written to a http.Request. -*/ -type PatchEndpointIDLabelsParams struct { - - // Configuration. - Configuration *models.LabelConfigurationSpec - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the patch endpoint ID labels params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchEndpointIDLabelsParams) WithDefaults() *PatchEndpointIDLabelsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the patch endpoint ID labels params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchEndpointIDLabelsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) WithTimeout(timeout time.Duration) *PatchEndpointIDLabelsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) WithContext(ctx context.Context) *PatchEndpointIDLabelsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) WithHTTPClient(client *http.Client) *PatchEndpointIDLabelsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithConfiguration adds the configuration to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) WithConfiguration(configuration *models.LabelConfigurationSpec) *PatchEndpointIDLabelsParams { - o.SetConfiguration(configuration) - return o -} - -// SetConfiguration adds the configuration to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) SetConfiguration(configuration *models.LabelConfigurationSpec) { - o.Configuration = configuration -} - -// WithID adds the id to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) WithID(id string) *PatchEndpointIDLabelsParams { - o.SetID(id) - return o -} - -// SetID adds the id to the patch endpoint ID labels params -func (o *PatchEndpointIDLabelsParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *PatchEndpointIDLabelsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Configuration != nil { - if err := r.SetBodyParam(o.Configuration); err != nil { - return err - } - } - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go deleted file mode 100644 index 1aac9bc79..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_labels_responses.go +++ /dev/null @@ -1,270 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PatchEndpointIDLabelsReader is a Reader for the PatchEndpointIDLabels structure. -type PatchEndpointIDLabelsReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PatchEndpointIDLabelsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPatchEndpointIDLabelsOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewPatchEndpointIDLabelsNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewPatchEndpointIDLabelsTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPatchEndpointIDLabelsUpdateFailed() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPatchEndpointIDLabelsOK creates a PatchEndpointIDLabelsOK with default headers values -func NewPatchEndpointIDLabelsOK() *PatchEndpointIDLabelsOK { - return &PatchEndpointIDLabelsOK{} -} - -/* -PatchEndpointIDLabelsOK describes a response with status code 200, with default header values. - -Success -*/ -type PatchEndpointIDLabelsOK struct { -} - -// IsSuccess returns true when this patch endpoint Id labels o k response has a 2xx status code -func (o *PatchEndpointIDLabelsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this patch endpoint Id labels o k response has a 3xx status code -func (o *PatchEndpointIDLabelsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id labels o k response has a 4xx status code -func (o *PatchEndpointIDLabelsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch endpoint Id labels o k response has a 5xx status code -func (o *PatchEndpointIDLabelsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id labels o k response a status code equal to that given -func (o *PatchEndpointIDLabelsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PatchEndpointIDLabelsOK) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK ", 200) -} - -func (o *PatchEndpointIDLabelsOK) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsOK ", 200) -} - -func (o *PatchEndpointIDLabelsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDLabelsNotFound creates a PatchEndpointIDLabelsNotFound with default headers values -func NewPatchEndpointIDLabelsNotFound() *PatchEndpointIDLabelsNotFound { - return &PatchEndpointIDLabelsNotFound{} -} - -/* -PatchEndpointIDLabelsNotFound describes a response with status code 404, with default header values. - -Endpoint not found -*/ -type PatchEndpointIDLabelsNotFound struct { -} - -// IsSuccess returns true when this patch endpoint Id labels not found response has a 2xx status code -func (o *PatchEndpointIDLabelsNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id labels not found response has a 3xx status code -func (o *PatchEndpointIDLabelsNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id labels not found response has a 4xx status code -func (o *PatchEndpointIDLabelsNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id labels not found response has a 5xx status code -func (o *PatchEndpointIDLabelsNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id labels not found response a status code equal to that given -func (o *PatchEndpointIDLabelsNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *PatchEndpointIDLabelsNotFound) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound ", 404) -} - -func (o *PatchEndpointIDLabelsNotFound) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsNotFound ", 404) -} - -func (o *PatchEndpointIDLabelsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDLabelsTooManyRequests creates a PatchEndpointIDLabelsTooManyRequests with default headers values -func NewPatchEndpointIDLabelsTooManyRequests() *PatchEndpointIDLabelsTooManyRequests { - return &PatchEndpointIDLabelsTooManyRequests{} -} - -/* -PatchEndpointIDLabelsTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type PatchEndpointIDLabelsTooManyRequests struct { -} - -// IsSuccess returns true when this patch endpoint Id labels too many requests response has a 2xx status code -func (o *PatchEndpointIDLabelsTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id labels too many requests response has a 3xx status code -func (o *PatchEndpointIDLabelsTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id labels too many requests response has a 4xx status code -func (o *PatchEndpointIDLabelsTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id labels too many requests response has a 5xx status code -func (o *PatchEndpointIDLabelsTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id labels too many requests response a status code equal to that given -func (o *PatchEndpointIDLabelsTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *PatchEndpointIDLabelsTooManyRequests) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests ", 429) -} - -func (o *PatchEndpointIDLabelsTooManyRequests) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsTooManyRequests ", 429) -} - -func (o *PatchEndpointIDLabelsTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDLabelsUpdateFailed creates a PatchEndpointIDLabelsUpdateFailed with default headers values -func NewPatchEndpointIDLabelsUpdateFailed() *PatchEndpointIDLabelsUpdateFailed { - return &PatchEndpointIDLabelsUpdateFailed{} -} - -/* -PatchEndpointIDLabelsUpdateFailed describes a response with status code 500, with default header values. - -Error while updating labels -*/ -type PatchEndpointIDLabelsUpdateFailed struct { - Payload models.Error -} - -// IsSuccess returns true when this patch endpoint Id labels update failed response has a 2xx status code -func (o *PatchEndpointIDLabelsUpdateFailed) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id labels update failed response has a 3xx status code -func (o *PatchEndpointIDLabelsUpdateFailed) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id labels update failed response has a 4xx status code -func (o *PatchEndpointIDLabelsUpdateFailed) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch endpoint Id labels update failed response has a 5xx status code -func (o *PatchEndpointIDLabelsUpdateFailed) IsServerError() bool { - return true -} - -// IsCode returns true when this patch endpoint Id labels update failed response a status code equal to that given -func (o *PatchEndpointIDLabelsUpdateFailed) IsCode(code int) bool { - return code == 500 -} - -func (o *PatchEndpointIDLabelsUpdateFailed) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %+v", 500, o.Payload) -} - -func (o *PatchEndpointIDLabelsUpdateFailed) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}/labels][%d] patchEndpointIdLabelsUpdateFailed %+v", 500, o.Payload) -} - -func (o *PatchEndpointIDLabelsUpdateFailed) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchEndpointIDLabelsUpdateFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go deleted file mode 100644 index fc8495b4c..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_parameters.go +++ /dev/null @@ -1,187 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPatchEndpointIDParams creates a new PatchEndpointIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPatchEndpointIDParams() *PatchEndpointIDParams { - return &PatchEndpointIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPatchEndpointIDParamsWithTimeout creates a new PatchEndpointIDParams object -// with the ability to set a timeout on a request. -func NewPatchEndpointIDParamsWithTimeout(timeout time.Duration) *PatchEndpointIDParams { - return &PatchEndpointIDParams{ - timeout: timeout, - } -} - -// NewPatchEndpointIDParamsWithContext creates a new PatchEndpointIDParams object -// with the ability to set a context for a request. -func NewPatchEndpointIDParamsWithContext(ctx context.Context) *PatchEndpointIDParams { - return &PatchEndpointIDParams{ - Context: ctx, - } -} - -// NewPatchEndpointIDParamsWithHTTPClient creates a new PatchEndpointIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewPatchEndpointIDParamsWithHTTPClient(client *http.Client) *PatchEndpointIDParams { - return &PatchEndpointIDParams{ - HTTPClient: client, - } -} - -/* -PatchEndpointIDParams contains all the parameters to send to the API endpoint - - for the patch endpoint ID operation. - - Typically these are written to a http.Request. -*/ -type PatchEndpointIDParams struct { - - // Endpoint. - Endpoint *models.EndpointChangeRequest - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the patch endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchEndpointIDParams) WithDefaults() *PatchEndpointIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the patch endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchEndpointIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the patch endpoint ID params -func (o *PatchEndpointIDParams) WithTimeout(timeout time.Duration) *PatchEndpointIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the patch endpoint ID params -func (o *PatchEndpointIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the patch endpoint ID params -func (o *PatchEndpointIDParams) WithContext(ctx context.Context) *PatchEndpointIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the patch endpoint ID params -func (o *PatchEndpointIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the patch endpoint ID params -func (o *PatchEndpointIDParams) WithHTTPClient(client *http.Client) *PatchEndpointIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the patch endpoint ID params -func (o *PatchEndpointIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithEndpoint adds the endpoint to the patch endpoint ID params -func (o *PatchEndpointIDParams) WithEndpoint(endpoint *models.EndpointChangeRequest) *PatchEndpointIDParams { - o.SetEndpoint(endpoint) - return o -} - -// SetEndpoint adds the endpoint to the patch endpoint ID params -func (o *PatchEndpointIDParams) SetEndpoint(endpoint *models.EndpointChangeRequest) { - o.Endpoint = endpoint -} - -// WithID adds the id to the patch endpoint ID params -func (o *PatchEndpointIDParams) WithID(id string) *PatchEndpointIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the patch endpoint ID params -func (o *PatchEndpointIDParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *PatchEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Endpoint != nil { - if err := r.SetBodyParam(o.Endpoint); err != nil { - return err - } - } - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go deleted file mode 100644 index e2d4528b9..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/patch_endpoint_id_responses.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PatchEndpointIDReader is a Reader for the PatchEndpointID structure. -type PatchEndpointIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PatchEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPatchEndpointIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewPatchEndpointIDInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewPatchEndpointIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewPatchEndpointIDTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPatchEndpointIDFailed() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPatchEndpointIDOK creates a PatchEndpointIDOK with default headers values -func NewPatchEndpointIDOK() *PatchEndpointIDOK { - return &PatchEndpointIDOK{} -} - -/* -PatchEndpointIDOK describes a response with status code 200, with default header values. - -Success -*/ -type PatchEndpointIDOK struct { -} - -// IsSuccess returns true when this patch endpoint Id o k response has a 2xx status code -func (o *PatchEndpointIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this patch endpoint Id o k response has a 3xx status code -func (o *PatchEndpointIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id o k response has a 4xx status code -func (o *PatchEndpointIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch endpoint Id o k response has a 5xx status code -func (o *PatchEndpointIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id o k response a status code equal to that given -func (o *PatchEndpointIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PatchEndpointIDOK) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK ", 200) -} - -func (o *PatchEndpointIDOK) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdOK ", 200) -} - -func (o *PatchEndpointIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDInvalid creates a PatchEndpointIDInvalid with default headers values -func NewPatchEndpointIDInvalid() *PatchEndpointIDInvalid { - return &PatchEndpointIDInvalid{} -} - -/* -PatchEndpointIDInvalid describes a response with status code 400, with default header values. - -Invalid modify endpoint request -*/ -type PatchEndpointIDInvalid struct { - Payload models.Error -} - -// IsSuccess returns true when this patch endpoint Id invalid response has a 2xx status code -func (o *PatchEndpointIDInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id invalid response has a 3xx status code -func (o *PatchEndpointIDInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id invalid response has a 4xx status code -func (o *PatchEndpointIDInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id invalid response has a 5xx status code -func (o *PatchEndpointIDInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id invalid response a status code equal to that given -func (o *PatchEndpointIDInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *PatchEndpointIDInvalid) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *PatchEndpointIDInvalid) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *PatchEndpointIDInvalid) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPatchEndpointIDNotFound creates a PatchEndpointIDNotFound with default headers values -func NewPatchEndpointIDNotFound() *PatchEndpointIDNotFound { - return &PatchEndpointIDNotFound{} -} - -/* -PatchEndpointIDNotFound describes a response with status code 404, with default header values. - -Endpoint does not exist -*/ -type PatchEndpointIDNotFound struct { -} - -// IsSuccess returns true when this patch endpoint Id not found response has a 2xx status code -func (o *PatchEndpointIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id not found response has a 3xx status code -func (o *PatchEndpointIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id not found response has a 4xx status code -func (o *PatchEndpointIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id not found response has a 5xx status code -func (o *PatchEndpointIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id not found response a status code equal to that given -func (o *PatchEndpointIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *PatchEndpointIDNotFound) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound ", 404) -} - -func (o *PatchEndpointIDNotFound) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdNotFound ", 404) -} - -func (o *PatchEndpointIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDTooManyRequests creates a PatchEndpointIDTooManyRequests with default headers values -func NewPatchEndpointIDTooManyRequests() *PatchEndpointIDTooManyRequests { - return &PatchEndpointIDTooManyRequests{} -} - -/* -PatchEndpointIDTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type PatchEndpointIDTooManyRequests struct { -} - -// IsSuccess returns true when this patch endpoint Id too many requests response has a 2xx status code -func (o *PatchEndpointIDTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id too many requests response has a 3xx status code -func (o *PatchEndpointIDTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id too many requests response has a 4xx status code -func (o *PatchEndpointIDTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch endpoint Id too many requests response has a 5xx status code -func (o *PatchEndpointIDTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this patch endpoint Id too many requests response a status code equal to that given -func (o *PatchEndpointIDTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *PatchEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests ", 429) -} - -func (o *PatchEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdTooManyRequests ", 429) -} - -func (o *PatchEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPatchEndpointIDFailed creates a PatchEndpointIDFailed with default headers values -func NewPatchEndpointIDFailed() *PatchEndpointIDFailed { - return &PatchEndpointIDFailed{} -} - -/* -PatchEndpointIDFailed describes a response with status code 500, with default header values. - -Endpoint update failed -*/ -type PatchEndpointIDFailed struct { - Payload models.Error -} - -// IsSuccess returns true when this patch endpoint Id failed response has a 2xx status code -func (o *PatchEndpointIDFailed) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch endpoint Id failed response has a 3xx status code -func (o *PatchEndpointIDFailed) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch endpoint Id failed response has a 4xx status code -func (o *PatchEndpointIDFailed) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch endpoint Id failed response has a 5xx status code -func (o *PatchEndpointIDFailed) IsServerError() bool { - return true -} - -// IsCode returns true when this patch endpoint Id failed response a status code equal to that given -func (o *PatchEndpointIDFailed) IsCode(code int) bool { - return code == 500 -} - -func (o *PatchEndpointIDFailed) Error() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %+v", 500, o.Payload) -} - -func (o *PatchEndpointIDFailed) String() string { - return fmt.Sprintf("[PATCH /endpoint/{id}][%d] patchEndpointIdFailed %+v", 500, o.Payload) -} - -func (o *PatchEndpointIDFailed) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchEndpointIDFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go deleted file mode 100644 index 9692c0013..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_parameters.go +++ /dev/null @@ -1,187 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPutEndpointIDParams creates a new PutEndpointIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPutEndpointIDParams() *PutEndpointIDParams { - return &PutEndpointIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPutEndpointIDParamsWithTimeout creates a new PutEndpointIDParams object -// with the ability to set a timeout on a request. -func NewPutEndpointIDParamsWithTimeout(timeout time.Duration) *PutEndpointIDParams { - return &PutEndpointIDParams{ - timeout: timeout, - } -} - -// NewPutEndpointIDParamsWithContext creates a new PutEndpointIDParams object -// with the ability to set a context for a request. -func NewPutEndpointIDParamsWithContext(ctx context.Context) *PutEndpointIDParams { - return &PutEndpointIDParams{ - Context: ctx, - } -} - -// NewPutEndpointIDParamsWithHTTPClient creates a new PutEndpointIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewPutEndpointIDParamsWithHTTPClient(client *http.Client) *PutEndpointIDParams { - return &PutEndpointIDParams{ - HTTPClient: client, - } -} - -/* -PutEndpointIDParams contains all the parameters to send to the API endpoint - - for the put endpoint ID operation. - - Typically these are written to a http.Request. -*/ -type PutEndpointIDParams struct { - - // Endpoint. - Endpoint *models.EndpointChangeRequest - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the put endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutEndpointIDParams) WithDefaults() *PutEndpointIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the put endpoint ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutEndpointIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the put endpoint ID params -func (o *PutEndpointIDParams) WithTimeout(timeout time.Duration) *PutEndpointIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the put endpoint ID params -func (o *PutEndpointIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the put endpoint ID params -func (o *PutEndpointIDParams) WithContext(ctx context.Context) *PutEndpointIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the put endpoint ID params -func (o *PutEndpointIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the put endpoint ID params -func (o *PutEndpointIDParams) WithHTTPClient(client *http.Client) *PutEndpointIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the put endpoint ID params -func (o *PutEndpointIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithEndpoint adds the endpoint to the put endpoint ID params -func (o *PutEndpointIDParams) WithEndpoint(endpoint *models.EndpointChangeRequest) *PutEndpointIDParams { - o.SetEndpoint(endpoint) - return o -} - -// SetEndpoint adds the endpoint to the put endpoint ID params -func (o *PutEndpointIDParams) SetEndpoint(endpoint *models.EndpointChangeRequest) { - o.Endpoint = endpoint -} - -// WithID adds the id to the put endpoint ID params -func (o *PutEndpointIDParams) WithID(id string) *PutEndpointIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the put endpoint ID params -func (o *PutEndpointIDParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *PutEndpointIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Endpoint != nil { - if err := r.SetBodyParam(o.Endpoint); err != nil { - return err - } - } - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go deleted file mode 100644 index 502daa86a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/endpoint/put_endpoint_id_responses.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package endpoint - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PutEndpointIDReader is a Reader for the PutEndpointID structure. -type PutEndpointIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PutEndpointIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 201: - result := NewPutEndpointIDCreated() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewPutEndpointIDInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 409: - result := NewPutEndpointIDExists() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 429: - result := NewPutEndpointIDTooManyRequests() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPutEndpointIDFailed() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPutEndpointIDCreated creates a PutEndpointIDCreated with default headers values -func NewPutEndpointIDCreated() *PutEndpointIDCreated { - return &PutEndpointIDCreated{} -} - -/* -PutEndpointIDCreated describes a response with status code 201, with default header values. - -Created -*/ -type PutEndpointIDCreated struct { -} - -// IsSuccess returns true when this put endpoint Id created response has a 2xx status code -func (o *PutEndpointIDCreated) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this put endpoint Id created response has a 3xx status code -func (o *PutEndpointIDCreated) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put endpoint Id created response has a 4xx status code -func (o *PutEndpointIDCreated) IsClientError() bool { - return false -} - -// IsServerError returns true when this put endpoint Id created response has a 5xx status code -func (o *PutEndpointIDCreated) IsServerError() bool { - return false -} - -// IsCode returns true when this put endpoint Id created response a status code equal to that given -func (o *PutEndpointIDCreated) IsCode(code int) bool { - return code == 201 -} - -func (o *PutEndpointIDCreated) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated ", 201) -} - -func (o *PutEndpointIDCreated) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdCreated ", 201) -} - -func (o *PutEndpointIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPutEndpointIDInvalid creates a PutEndpointIDInvalid with default headers values -func NewPutEndpointIDInvalid() *PutEndpointIDInvalid { - return &PutEndpointIDInvalid{} -} - -/* -PutEndpointIDInvalid describes a response with status code 400, with default header values. - -Invalid endpoint in request -*/ -type PutEndpointIDInvalid struct { - Payload models.Error -} - -// IsSuccess returns true when this put endpoint Id invalid response has a 2xx status code -func (o *PutEndpointIDInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put endpoint Id invalid response has a 3xx status code -func (o *PutEndpointIDInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put endpoint Id invalid response has a 4xx status code -func (o *PutEndpointIDInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this put endpoint Id invalid response has a 5xx status code -func (o *PutEndpointIDInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this put endpoint Id invalid response a status code equal to that given -func (o *PutEndpointIDInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *PutEndpointIDInvalid) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *PutEndpointIDInvalid) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdInvalid %+v", 400, o.Payload) -} - -func (o *PutEndpointIDInvalid) GetPayload() models.Error { - return o.Payload -} - -func (o *PutEndpointIDInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutEndpointIDExists creates a PutEndpointIDExists with default headers values -func NewPutEndpointIDExists() *PutEndpointIDExists { - return &PutEndpointIDExists{} -} - -/* -PutEndpointIDExists describes a response with status code 409, with default header values. - -Endpoint already exists -*/ -type PutEndpointIDExists struct { -} - -// IsSuccess returns true when this put endpoint Id exists response has a 2xx status code -func (o *PutEndpointIDExists) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put endpoint Id exists response has a 3xx status code -func (o *PutEndpointIDExists) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put endpoint Id exists response has a 4xx status code -func (o *PutEndpointIDExists) IsClientError() bool { - return true -} - -// IsServerError returns true when this put endpoint Id exists response has a 5xx status code -func (o *PutEndpointIDExists) IsServerError() bool { - return false -} - -// IsCode returns true when this put endpoint Id exists response a status code equal to that given -func (o *PutEndpointIDExists) IsCode(code int) bool { - return code == 409 -} - -func (o *PutEndpointIDExists) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists ", 409) -} - -func (o *PutEndpointIDExists) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdExists ", 409) -} - -func (o *PutEndpointIDExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPutEndpointIDTooManyRequests creates a PutEndpointIDTooManyRequests with default headers values -func NewPutEndpointIDTooManyRequests() *PutEndpointIDTooManyRequests { - return &PutEndpointIDTooManyRequests{} -} - -/* -PutEndpointIDTooManyRequests describes a response with status code 429, with default header values. - -Rate-limiting too many requests in the given time frame -*/ -type PutEndpointIDTooManyRequests struct { -} - -// IsSuccess returns true when this put endpoint Id too many requests response has a 2xx status code -func (o *PutEndpointIDTooManyRequests) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put endpoint Id too many requests response has a 3xx status code -func (o *PutEndpointIDTooManyRequests) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put endpoint Id too many requests response has a 4xx status code -func (o *PutEndpointIDTooManyRequests) IsClientError() bool { - return true -} - -// IsServerError returns true when this put endpoint Id too many requests response has a 5xx status code -func (o *PutEndpointIDTooManyRequests) IsServerError() bool { - return false -} - -// IsCode returns true when this put endpoint Id too many requests response a status code equal to that given -func (o *PutEndpointIDTooManyRequests) IsCode(code int) bool { - return code == 429 -} - -func (o *PutEndpointIDTooManyRequests) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests ", 429) -} - -func (o *PutEndpointIDTooManyRequests) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdTooManyRequests ", 429) -} - -func (o *PutEndpointIDTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPutEndpointIDFailed creates a PutEndpointIDFailed with default headers values -func NewPutEndpointIDFailed() *PutEndpointIDFailed { - return &PutEndpointIDFailed{} -} - -/* -PutEndpointIDFailed describes a response with status code 500, with default header values. - -Endpoint creation failed -*/ -type PutEndpointIDFailed struct { - Payload models.Error -} - -// IsSuccess returns true when this put endpoint Id failed response has a 2xx status code -func (o *PutEndpointIDFailed) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put endpoint Id failed response has a 3xx status code -func (o *PutEndpointIDFailed) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put endpoint Id failed response has a 4xx status code -func (o *PutEndpointIDFailed) IsClientError() bool { - return false -} - -// IsServerError returns true when this put endpoint Id failed response has a 5xx status code -func (o *PutEndpointIDFailed) IsServerError() bool { - return true -} - -// IsCode returns true when this put endpoint Id failed response a status code equal to that given -func (o *PutEndpointIDFailed) IsCode(code int) bool { - return code == 500 -} - -func (o *PutEndpointIDFailed) Error() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %+v", 500, o.Payload) -} - -func (o *PutEndpointIDFailed) String() string { - return fmt.Sprintf("[PUT /endpoint/{id}][%d] putEndpointIdFailed %+v", 500, o.Payload) -} - -func (o *PutEndpointIDFailed) GetPayload() models.Error { - return o.Payload -} - -func (o *PutEndpointIDFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_parameters.go deleted file mode 100644 index 25fcac67f..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_parameters.go +++ /dev/null @@ -1,185 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package ipam - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewDeleteIpamIPParams creates a new DeleteIpamIPParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewDeleteIpamIPParams() *DeleteIpamIPParams { - return &DeleteIpamIPParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewDeleteIpamIPParamsWithTimeout creates a new DeleteIpamIPParams object -// with the ability to set a timeout on a request. -func NewDeleteIpamIPParamsWithTimeout(timeout time.Duration) *DeleteIpamIPParams { - return &DeleteIpamIPParams{ - timeout: timeout, - } -} - -// NewDeleteIpamIPParamsWithContext creates a new DeleteIpamIPParams object -// with the ability to set a context for a request. -func NewDeleteIpamIPParamsWithContext(ctx context.Context) *DeleteIpamIPParams { - return &DeleteIpamIPParams{ - Context: ctx, - } -} - -// NewDeleteIpamIPParamsWithHTTPClient creates a new DeleteIpamIPParams object -// with the ability to set a custom HTTPClient for a request. -func NewDeleteIpamIPParamsWithHTTPClient(client *http.Client) *DeleteIpamIPParams { - return &DeleteIpamIPParams{ - HTTPClient: client, - } -} - -/* -DeleteIpamIPParams contains all the parameters to send to the API endpoint - - for the delete ipam IP operation. - - Typically these are written to a http.Request. -*/ -type DeleteIpamIPParams struct { - - /* IP. - - IP address or owner name - */ - IP string - - // Pool. - Pool *string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the delete ipam IP params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteIpamIPParams) WithDefaults() *DeleteIpamIPParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the delete ipam IP params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteIpamIPParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the delete ipam IP params -func (o *DeleteIpamIPParams) WithTimeout(timeout time.Duration) *DeleteIpamIPParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the delete ipam IP params -func (o *DeleteIpamIPParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the delete ipam IP params -func (o *DeleteIpamIPParams) WithContext(ctx context.Context) *DeleteIpamIPParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the delete ipam IP params -func (o *DeleteIpamIPParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the delete ipam IP params -func (o *DeleteIpamIPParams) WithHTTPClient(client *http.Client) *DeleteIpamIPParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the delete ipam IP params -func (o *DeleteIpamIPParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithIP adds the ip to the delete ipam IP params -func (o *DeleteIpamIPParams) WithIP(ip string) *DeleteIpamIPParams { - o.SetIP(ip) - return o -} - -// SetIP adds the ip to the delete ipam IP params -func (o *DeleteIpamIPParams) SetIP(ip string) { - o.IP = ip -} - -// WithPool adds the pool to the delete ipam IP params -func (o *DeleteIpamIPParams) WithPool(pool *string) *DeleteIpamIPParams { - o.SetPool(pool) - return o -} - -// SetPool adds the pool to the delete ipam IP params -func (o *DeleteIpamIPParams) SetPool(pool *string) { - o.Pool = pool -} - -// WriteToRequest writes these params to a swagger request -func (o *DeleteIpamIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param ip - if err := r.SetPathParam("ip", o.IP); err != nil { - return err - } - - if o.Pool != nil { - - // query param pool - var qrPool string - - if o.Pool != nil { - qrPool = *o.Pool - } - qPool := qrPool - if qPool != "" { - - if err := r.SetQueryParam("pool", qPool); err != nil { - return err - } - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go deleted file mode 100644 index 8f0e9acc0..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/delete_ipam_ip_responses.go +++ /dev/null @@ -1,327 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package ipam - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// DeleteIpamIPReader is a Reader for the DeleteIpamIP structure. -type DeleteIpamIPReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *DeleteIpamIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewDeleteIpamIPOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewDeleteIpamIPInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewDeleteIpamIPNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewDeleteIpamIPFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 501: - result := NewDeleteIpamIPDisabled() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewDeleteIpamIPOK creates a DeleteIpamIPOK with default headers values -func NewDeleteIpamIPOK() *DeleteIpamIPOK { - return &DeleteIpamIPOK{} -} - -/* -DeleteIpamIPOK describes a response with status code 200, with default header values. - -Success -*/ -type DeleteIpamIPOK struct { -} - -// IsSuccess returns true when this delete ipam Ip o k response has a 2xx status code -func (o *DeleteIpamIPOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete ipam Ip o k response has a 3xx status code -func (o *DeleteIpamIPOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete ipam Ip o k response has a 4xx status code -func (o *DeleteIpamIPOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete ipam Ip o k response has a 5xx status code -func (o *DeleteIpamIPOK) IsServerError() bool { - return false -} - -// IsCode returns true when this delete ipam Ip o k response a status code equal to that given -func (o *DeleteIpamIPOK) IsCode(code int) bool { - return code == 200 -} - -func (o *DeleteIpamIPOK) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK ", 200) -} - -func (o *DeleteIpamIPOK) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpOK ", 200) -} - -func (o *DeleteIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteIpamIPInvalid creates a DeleteIpamIPInvalid with default headers values -func NewDeleteIpamIPInvalid() *DeleteIpamIPInvalid { - return &DeleteIpamIPInvalid{} -} - -/* -DeleteIpamIPInvalid describes a response with status code 400, with default header values. - -Invalid IP address -*/ -type DeleteIpamIPInvalid struct { -} - -// IsSuccess returns true when this delete ipam Ip invalid response has a 2xx status code -func (o *DeleteIpamIPInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete ipam Ip invalid response has a 3xx status code -func (o *DeleteIpamIPInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete ipam Ip invalid response has a 4xx status code -func (o *DeleteIpamIPInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete ipam Ip invalid response has a 5xx status code -func (o *DeleteIpamIPInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this delete ipam Ip invalid response a status code equal to that given -func (o *DeleteIpamIPInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *DeleteIpamIPInvalid) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid ", 400) -} - -func (o *DeleteIpamIPInvalid) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpInvalid ", 400) -} - -func (o *DeleteIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteIpamIPNotFound creates a DeleteIpamIPNotFound with default headers values -func NewDeleteIpamIPNotFound() *DeleteIpamIPNotFound { - return &DeleteIpamIPNotFound{} -} - -/* -DeleteIpamIPNotFound describes a response with status code 404, with default header values. - -IP address not found -*/ -type DeleteIpamIPNotFound struct { -} - -// IsSuccess returns true when this delete ipam Ip not found response has a 2xx status code -func (o *DeleteIpamIPNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete ipam Ip not found response has a 3xx status code -func (o *DeleteIpamIPNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete ipam Ip not found response has a 4xx status code -func (o *DeleteIpamIPNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete ipam Ip not found response has a 5xx status code -func (o *DeleteIpamIPNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this delete ipam Ip not found response a status code equal to that given -func (o *DeleteIpamIPNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *DeleteIpamIPNotFound) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound ", 404) -} - -func (o *DeleteIpamIPNotFound) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpNotFound ", 404) -} - -func (o *DeleteIpamIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteIpamIPFailure creates a DeleteIpamIPFailure with default headers values -func NewDeleteIpamIPFailure() *DeleteIpamIPFailure { - return &DeleteIpamIPFailure{} -} - -/* -DeleteIpamIPFailure describes a response with status code 500, with default header values. - -Address release failure -*/ -type DeleteIpamIPFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this delete ipam Ip failure response has a 2xx status code -func (o *DeleteIpamIPFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete ipam Ip failure response has a 3xx status code -func (o *DeleteIpamIPFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete ipam Ip failure response has a 4xx status code -func (o *DeleteIpamIPFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete ipam Ip failure response has a 5xx status code -func (o *DeleteIpamIPFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this delete ipam Ip failure response a status code equal to that given -func (o *DeleteIpamIPFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *DeleteIpamIPFailure) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %+v", 500, o.Payload) -} - -func (o *DeleteIpamIPFailure) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpFailure %+v", 500, o.Payload) -} - -func (o *DeleteIpamIPFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *DeleteIpamIPFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewDeleteIpamIPDisabled creates a DeleteIpamIPDisabled with default headers values -func NewDeleteIpamIPDisabled() *DeleteIpamIPDisabled { - return &DeleteIpamIPDisabled{} -} - -/* -DeleteIpamIPDisabled describes a response with status code 501, with default header values. - -Allocation for address family disabled -*/ -type DeleteIpamIPDisabled struct { -} - -// IsSuccess returns true when this delete ipam Ip disabled response has a 2xx status code -func (o *DeleteIpamIPDisabled) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete ipam Ip disabled response has a 3xx status code -func (o *DeleteIpamIPDisabled) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete ipam Ip disabled response has a 4xx status code -func (o *DeleteIpamIPDisabled) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete ipam Ip disabled response has a 5xx status code -func (o *DeleteIpamIPDisabled) IsServerError() bool { - return true -} - -// IsCode returns true when this delete ipam Ip disabled response a status code equal to that given -func (o *DeleteIpamIPDisabled) IsCode(code int) bool { - return code == 501 -} - -func (o *DeleteIpamIPDisabled) Error() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled ", 501) -} - -func (o *DeleteIpamIPDisabled) String() string { - return fmt.Sprintf("[DELETE /ipam/{ip}][%d] deleteIpamIpDisabled ", 501) -} - -func (o *DeleteIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go deleted file mode 100644 index bff4193d6..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/ipam_client.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package ipam - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new ipam API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for ipam API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - DeleteIpamIP(params *DeleteIpamIPParams, opts ...ClientOption) (*DeleteIpamIPOK, error) - - PostIpam(params *PostIpamParams, opts ...ClientOption) (*PostIpamCreated, error) - - PostIpamIP(params *PostIpamIPParams, opts ...ClientOption) (*PostIpamIPOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -DeleteIpamIP releases an allocated IP address -*/ -func (a *Client) DeleteIpamIP(params *DeleteIpamIPParams, opts ...ClientOption) (*DeleteIpamIPOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewDeleteIpamIPParams() - } - op := &runtime.ClientOperation{ - ID: "DeleteIpamIP", - Method: "DELETE", - PathPattern: "/ipam/{ip}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &DeleteIpamIPReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*DeleteIpamIPOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for DeleteIpamIP: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PostIpam allocates an IP address -*/ -func (a *Client) PostIpam(params *PostIpamParams, opts ...ClientOption) (*PostIpamCreated, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPostIpamParams() - } - op := &runtime.ClientOperation{ - ID: "PostIpam", - Method: "POST", - PathPattern: "/ipam", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PostIpamReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PostIpamCreated) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PostIpam: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PostIpamIP allocates an IP address -*/ -func (a *Client) PostIpamIP(params *PostIpamIPParams, opts ...ClientOption) (*PostIpamIPOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPostIpamIPParams() - } - op := &runtime.ClientOperation{ - ID: "PostIpamIP", - Method: "POST", - PathPattern: "/ipam/{ip}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PostIpamIPReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PostIpamIPOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PostIpamIP: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_parameters.go deleted file mode 100644 index 6dbf02985..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_parameters.go +++ /dev/null @@ -1,216 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package ipam - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewPostIpamIPParams creates a new PostIpamIPParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPostIpamIPParams() *PostIpamIPParams { - return &PostIpamIPParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPostIpamIPParamsWithTimeout creates a new PostIpamIPParams object -// with the ability to set a timeout on a request. -func NewPostIpamIPParamsWithTimeout(timeout time.Duration) *PostIpamIPParams { - return &PostIpamIPParams{ - timeout: timeout, - } -} - -// NewPostIpamIPParamsWithContext creates a new PostIpamIPParams object -// with the ability to set a context for a request. -func NewPostIpamIPParamsWithContext(ctx context.Context) *PostIpamIPParams { - return &PostIpamIPParams{ - Context: ctx, - } -} - -// NewPostIpamIPParamsWithHTTPClient creates a new PostIpamIPParams object -// with the ability to set a custom HTTPClient for a request. -func NewPostIpamIPParamsWithHTTPClient(client *http.Client) *PostIpamIPParams { - return &PostIpamIPParams{ - HTTPClient: client, - } -} - -/* -PostIpamIPParams contains all the parameters to send to the API endpoint - - for the post ipam IP operation. - - Typically these are written to a http.Request. -*/ -type PostIpamIPParams struct { - - /* IP. - - IP address - */ - IP string - - // Owner. - Owner *string - - // Pool. - Pool *string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the post ipam IP params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PostIpamIPParams) WithDefaults() *PostIpamIPParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the post ipam IP params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PostIpamIPParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the post ipam IP params -func (o *PostIpamIPParams) WithTimeout(timeout time.Duration) *PostIpamIPParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the post ipam IP params -func (o *PostIpamIPParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the post ipam IP params -func (o *PostIpamIPParams) WithContext(ctx context.Context) *PostIpamIPParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the post ipam IP params -func (o *PostIpamIPParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the post ipam IP params -func (o *PostIpamIPParams) WithHTTPClient(client *http.Client) *PostIpamIPParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the post ipam IP params -func (o *PostIpamIPParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithIP adds the ip to the post ipam IP params -func (o *PostIpamIPParams) WithIP(ip string) *PostIpamIPParams { - o.SetIP(ip) - return o -} - -// SetIP adds the ip to the post ipam IP params -func (o *PostIpamIPParams) SetIP(ip string) { - o.IP = ip -} - -// WithOwner adds the owner to the post ipam IP params -func (o *PostIpamIPParams) WithOwner(owner *string) *PostIpamIPParams { - o.SetOwner(owner) - return o -} - -// SetOwner adds the owner to the post ipam IP params -func (o *PostIpamIPParams) SetOwner(owner *string) { - o.Owner = owner -} - -// WithPool adds the pool to the post ipam IP params -func (o *PostIpamIPParams) WithPool(pool *string) *PostIpamIPParams { - o.SetPool(pool) - return o -} - -// SetPool adds the pool to the post ipam IP params -func (o *PostIpamIPParams) SetPool(pool *string) { - o.Pool = pool -} - -// WriteToRequest writes these params to a swagger request -func (o *PostIpamIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param ip - if err := r.SetPathParam("ip", o.IP); err != nil { - return err - } - - if o.Owner != nil { - - // query param owner - var qrOwner string - - if o.Owner != nil { - qrOwner = *o.Owner - } - qOwner := qrOwner - if qOwner != "" { - - if err := r.SetQueryParam("owner", qOwner); err != nil { - return err - } - } - } - - if o.Pool != nil { - - // query param pool - var qrPool string - - if o.Pool != nil { - qrPool = *o.Pool - } - qPool := qrPool - if qPool != "" { - - if err := r.SetQueryParam("pool", qPool); err != nil { - return err - } - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go deleted file mode 100644 index 746965bb5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_ip_responses.go +++ /dev/null @@ -1,327 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package ipam - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PostIpamIPReader is a Reader for the PostIpamIP structure. -type PostIpamIPReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PostIpamIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPostIpamIPOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewPostIpamIPInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 409: - result := NewPostIpamIPExists() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPostIpamIPFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 501: - result := NewPostIpamIPDisabled() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPostIpamIPOK creates a PostIpamIPOK with default headers values -func NewPostIpamIPOK() *PostIpamIPOK { - return &PostIpamIPOK{} -} - -/* -PostIpamIPOK describes a response with status code 200, with default header values. - -Success -*/ -type PostIpamIPOK struct { -} - -// IsSuccess returns true when this post ipam Ip o k response has a 2xx status code -func (o *PostIpamIPOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this post ipam Ip o k response has a 3xx status code -func (o *PostIpamIPOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this post ipam Ip o k response has a 4xx status code -func (o *PostIpamIPOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this post ipam Ip o k response has a 5xx status code -func (o *PostIpamIPOK) IsServerError() bool { - return false -} - -// IsCode returns true when this post ipam Ip o k response a status code equal to that given -func (o *PostIpamIPOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PostIpamIPOK) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK ", 200) -} - -func (o *PostIpamIPOK) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpOK ", 200) -} - -func (o *PostIpamIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPostIpamIPInvalid creates a PostIpamIPInvalid with default headers values -func NewPostIpamIPInvalid() *PostIpamIPInvalid { - return &PostIpamIPInvalid{} -} - -/* -PostIpamIPInvalid describes a response with status code 400, with default header values. - -Invalid IP address -*/ -type PostIpamIPInvalid struct { -} - -// IsSuccess returns true when this post ipam Ip invalid response has a 2xx status code -func (o *PostIpamIPInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this post ipam Ip invalid response has a 3xx status code -func (o *PostIpamIPInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this post ipam Ip invalid response has a 4xx status code -func (o *PostIpamIPInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this post ipam Ip invalid response has a 5xx status code -func (o *PostIpamIPInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this post ipam Ip invalid response a status code equal to that given -func (o *PostIpamIPInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *PostIpamIPInvalid) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid ", 400) -} - -func (o *PostIpamIPInvalid) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpInvalid ", 400) -} - -func (o *PostIpamIPInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPostIpamIPExists creates a PostIpamIPExists with default headers values -func NewPostIpamIPExists() *PostIpamIPExists { - return &PostIpamIPExists{} -} - -/* -PostIpamIPExists describes a response with status code 409, with default header values. - -IP already allocated -*/ -type PostIpamIPExists struct { -} - -// IsSuccess returns true when this post ipam Ip exists response has a 2xx status code -func (o *PostIpamIPExists) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this post ipam Ip exists response has a 3xx status code -func (o *PostIpamIPExists) IsRedirect() bool { - return false -} - -// IsClientError returns true when this post ipam Ip exists response has a 4xx status code -func (o *PostIpamIPExists) IsClientError() bool { - return true -} - -// IsServerError returns true when this post ipam Ip exists response has a 5xx status code -func (o *PostIpamIPExists) IsServerError() bool { - return false -} - -// IsCode returns true when this post ipam Ip exists response a status code equal to that given -func (o *PostIpamIPExists) IsCode(code int) bool { - return code == 409 -} - -func (o *PostIpamIPExists) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists ", 409) -} - -func (o *PostIpamIPExists) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpExists ", 409) -} - -func (o *PostIpamIPExists) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPostIpamIPFailure creates a PostIpamIPFailure with default headers values -func NewPostIpamIPFailure() *PostIpamIPFailure { - return &PostIpamIPFailure{} -} - -/* -PostIpamIPFailure describes a response with status code 500, with default header values. - -IP allocation failure. Details in message. -*/ -type PostIpamIPFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this post ipam Ip failure response has a 2xx status code -func (o *PostIpamIPFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this post ipam Ip failure response has a 3xx status code -func (o *PostIpamIPFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this post ipam Ip failure response has a 4xx status code -func (o *PostIpamIPFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this post ipam Ip failure response has a 5xx status code -func (o *PostIpamIPFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this post ipam Ip failure response a status code equal to that given -func (o *PostIpamIPFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *PostIpamIPFailure) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %+v", 500, o.Payload) -} - -func (o *PostIpamIPFailure) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpFailure %+v", 500, o.Payload) -} - -func (o *PostIpamIPFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PostIpamIPFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPostIpamIPDisabled creates a PostIpamIPDisabled with default headers values -func NewPostIpamIPDisabled() *PostIpamIPDisabled { - return &PostIpamIPDisabled{} -} - -/* -PostIpamIPDisabled describes a response with status code 501, with default header values. - -Allocation for address family disabled -*/ -type PostIpamIPDisabled struct { -} - -// IsSuccess returns true when this post ipam Ip disabled response has a 2xx status code -func (o *PostIpamIPDisabled) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this post ipam Ip disabled response has a 3xx status code -func (o *PostIpamIPDisabled) IsRedirect() bool { - return false -} - -// IsClientError returns true when this post ipam Ip disabled response has a 4xx status code -func (o *PostIpamIPDisabled) IsClientError() bool { - return false -} - -// IsServerError returns true when this post ipam Ip disabled response has a 5xx status code -func (o *PostIpamIPDisabled) IsServerError() bool { - return true -} - -// IsCode returns true when this post ipam Ip disabled response a status code equal to that given -func (o *PostIpamIPDisabled) IsCode(code int) bool { - return code == 501 -} - -func (o *PostIpamIPDisabled) Error() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled ", 501) -} - -func (o *PostIpamIPDisabled) String() string { - return fmt.Sprintf("[POST /ipam/{ip}][%d] postIpamIpDisabled ", 501) -} - -func (o *PostIpamIPDisabled) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_parameters.go deleted file mode 100644 index 8e2075849..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_parameters.go +++ /dev/null @@ -1,248 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package ipam - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewPostIpamParams creates a new PostIpamParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPostIpamParams() *PostIpamParams { - return &PostIpamParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPostIpamParamsWithTimeout creates a new PostIpamParams object -// with the ability to set a timeout on a request. -func NewPostIpamParamsWithTimeout(timeout time.Duration) *PostIpamParams { - return &PostIpamParams{ - timeout: timeout, - } -} - -// NewPostIpamParamsWithContext creates a new PostIpamParams object -// with the ability to set a context for a request. -func NewPostIpamParamsWithContext(ctx context.Context) *PostIpamParams { - return &PostIpamParams{ - Context: ctx, - } -} - -// NewPostIpamParamsWithHTTPClient creates a new PostIpamParams object -// with the ability to set a custom HTTPClient for a request. -func NewPostIpamParamsWithHTTPClient(client *http.Client) *PostIpamParams { - return &PostIpamParams{ - HTTPClient: client, - } -} - -/* -PostIpamParams contains all the parameters to send to the API endpoint - - for the post ipam operation. - - Typically these are written to a http.Request. -*/ -type PostIpamParams struct { - - // Expiration. - Expiration *bool - - // Family. - Family *string - - // Owner. - Owner *string - - // Pool. - Pool *string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the post ipam params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PostIpamParams) WithDefaults() *PostIpamParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the post ipam params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PostIpamParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the post ipam params -func (o *PostIpamParams) WithTimeout(timeout time.Duration) *PostIpamParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the post ipam params -func (o *PostIpamParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the post ipam params -func (o *PostIpamParams) WithContext(ctx context.Context) *PostIpamParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the post ipam params -func (o *PostIpamParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the post ipam params -func (o *PostIpamParams) WithHTTPClient(client *http.Client) *PostIpamParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the post ipam params -func (o *PostIpamParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithExpiration adds the expiration to the post ipam params -func (o *PostIpamParams) WithExpiration(expiration *bool) *PostIpamParams { - o.SetExpiration(expiration) - return o -} - -// SetExpiration adds the expiration to the post ipam params -func (o *PostIpamParams) SetExpiration(expiration *bool) { - o.Expiration = expiration -} - -// WithFamily adds the family to the post ipam params -func (o *PostIpamParams) WithFamily(family *string) *PostIpamParams { - o.SetFamily(family) - return o -} - -// SetFamily adds the family to the post ipam params -func (o *PostIpamParams) SetFamily(family *string) { - o.Family = family -} - -// WithOwner adds the owner to the post ipam params -func (o *PostIpamParams) WithOwner(owner *string) *PostIpamParams { - o.SetOwner(owner) - return o -} - -// SetOwner adds the owner to the post ipam params -func (o *PostIpamParams) SetOwner(owner *string) { - o.Owner = owner -} - -// WithPool adds the pool to the post ipam params -func (o *PostIpamParams) WithPool(pool *string) *PostIpamParams { - o.SetPool(pool) - return o -} - -// SetPool adds the pool to the post ipam params -func (o *PostIpamParams) SetPool(pool *string) { - o.Pool = pool -} - -// WriteToRequest writes these params to a swagger request -func (o *PostIpamParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.Expiration != nil { - - // header param expiration - if err := r.SetHeaderParam("expiration", swag.FormatBool(*o.Expiration)); err != nil { - return err - } - } - - if o.Family != nil { - - // query param family - var qrFamily string - - if o.Family != nil { - qrFamily = *o.Family - } - qFamily := qrFamily - if qFamily != "" { - - if err := r.SetQueryParam("family", qFamily); err != nil { - return err - } - } - } - - if o.Owner != nil { - - // query param owner - var qrOwner string - - if o.Owner != nil { - qrOwner = *o.Owner - } - qOwner := qrOwner - if qOwner != "" { - - if err := r.SetQueryParam("owner", qOwner); err != nil { - return err - } - } - } - - if o.Pool != nil { - - // query param pool - var qrPool string - - if o.Pool != nil { - qrPool = *o.Pool - } - qPool := qrPool - if qPool != "" { - - if err := r.SetQueryParam("pool", qPool); err != nil { - return err - } - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go deleted file mode 100644 index 17ff143c8..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/ipam/post_ipam_responses.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package ipam - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PostIpamReader is a Reader for the PostIpam structure. -type PostIpamReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PostIpamReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 201: - result := NewPostIpamCreated() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 502: - result := NewPostIpamFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPostIpamCreated creates a PostIpamCreated with default headers values -func NewPostIpamCreated() *PostIpamCreated { - return &PostIpamCreated{} -} - -/* -PostIpamCreated describes a response with status code 201, with default header values. - -Success -*/ -type PostIpamCreated struct { - Payload *models.IPAMResponse -} - -// IsSuccess returns true when this post ipam created response has a 2xx status code -func (o *PostIpamCreated) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this post ipam created response has a 3xx status code -func (o *PostIpamCreated) IsRedirect() bool { - return false -} - -// IsClientError returns true when this post ipam created response has a 4xx status code -func (o *PostIpamCreated) IsClientError() bool { - return false -} - -// IsServerError returns true when this post ipam created response has a 5xx status code -func (o *PostIpamCreated) IsServerError() bool { - return false -} - -// IsCode returns true when this post ipam created response a status code equal to that given -func (o *PostIpamCreated) IsCode(code int) bool { - return code == 201 -} - -func (o *PostIpamCreated) Error() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %+v", 201, o.Payload) -} - -func (o *PostIpamCreated) String() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamCreated %+v", 201, o.Payload) -} - -func (o *PostIpamCreated) GetPayload() *models.IPAMResponse { - return o.Payload -} - -func (o *PostIpamCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.IPAMResponse) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPostIpamFailure creates a PostIpamFailure with default headers values -func NewPostIpamFailure() *PostIpamFailure { - return &PostIpamFailure{} -} - -/* -PostIpamFailure describes a response with status code 502, with default header values. - -Allocation failure -*/ -type PostIpamFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this post ipam failure response has a 2xx status code -func (o *PostIpamFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this post ipam failure response has a 3xx status code -func (o *PostIpamFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this post ipam failure response has a 4xx status code -func (o *PostIpamFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this post ipam failure response has a 5xx status code -func (o *PostIpamFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this post ipam failure response a status code equal to that given -func (o *PostIpamFailure) IsCode(code int) bool { - return code == 502 -} - -func (o *PostIpamFailure) Error() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %+v", 502, o.Payload) -} - -func (o *PostIpamFailure) String() string { - return fmt.Sprintf("[POST /ipam][%d] postIpamFailure %+v", 502, o.Payload) -} - -func (o *PostIpamFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PostIpamFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go deleted file mode 100644 index 7dd482af5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetMetricsParams creates a new GetMetricsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetMetricsParams() *GetMetricsParams { - return &GetMetricsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetMetricsParamsWithTimeout creates a new GetMetricsParams object -// with the ability to set a timeout on a request. -func NewGetMetricsParamsWithTimeout(timeout time.Duration) *GetMetricsParams { - return &GetMetricsParams{ - timeout: timeout, - } -} - -// NewGetMetricsParamsWithContext creates a new GetMetricsParams object -// with the ability to set a context for a request. -func NewGetMetricsParamsWithContext(ctx context.Context) *GetMetricsParams { - return &GetMetricsParams{ - Context: ctx, - } -} - -// NewGetMetricsParamsWithHTTPClient creates a new GetMetricsParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetMetricsParamsWithHTTPClient(client *http.Client) *GetMetricsParams { - return &GetMetricsParams{ - HTTPClient: client, - } -} - -/* -GetMetricsParams contains all the parameters to send to the API endpoint - - for the get metrics operation. - - Typically these are written to a http.Request. -*/ -type GetMetricsParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get metrics params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMetricsParams) WithDefaults() *GetMetricsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get metrics params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetMetricsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get metrics params -func (o *GetMetricsParams) WithTimeout(timeout time.Duration) *GetMetricsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get metrics params -func (o *GetMetricsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get metrics params -func (o *GetMetricsParams) WithContext(ctx context.Context) *GetMetricsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get metrics params -func (o *GetMetricsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get metrics params -func (o *GetMetricsParams) WithHTTPClient(client *http.Client) *GetMetricsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get metrics params -func (o *GetMetricsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetMetricsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go deleted file mode 100644 index 766782c92..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/metrics/get_metrics_responses.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetMetricsReader is a Reader for the GetMetrics structure. -type GetMetricsReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetMetricsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetMetricsOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewGetMetricsInternalServerError() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetMetricsOK creates a GetMetricsOK with default headers values -func NewGetMetricsOK() *GetMetricsOK { - return &GetMetricsOK{} -} - -/* -GetMetricsOK describes a response with status code 200, with default header values. - -Success -*/ -type GetMetricsOK struct { - Payload []*models.Metric -} - -// IsSuccess returns true when this get metrics o k response has a 2xx status code -func (o *GetMetricsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get metrics o k response has a 3xx status code -func (o *GetMetricsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get metrics o k response has a 4xx status code -func (o *GetMetricsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get metrics o k response has a 5xx status code -func (o *GetMetricsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get metrics o k response a status code equal to that given -func (o *GetMetricsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetMetricsOK) Error() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsOK %+v", 200, o.Payload) -} - -func (o *GetMetricsOK) String() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsOK %+v", 200, o.Payload) -} - -func (o *GetMetricsOK) GetPayload() []*models.Metric { - return o.Payload -} - -func (o *GetMetricsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetMetricsInternalServerError creates a GetMetricsInternalServerError with default headers values -func NewGetMetricsInternalServerError() *GetMetricsInternalServerError { - return &GetMetricsInternalServerError{} -} - -/* -GetMetricsInternalServerError describes a response with status code 500, with default header values. - -Metrics cannot be retrieved -*/ -type GetMetricsInternalServerError struct { -} - -// IsSuccess returns true when this get metrics internal server error response has a 2xx status code -func (o *GetMetricsInternalServerError) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get metrics internal server error response has a 3xx status code -func (o *GetMetricsInternalServerError) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get metrics internal server error response has a 4xx status code -func (o *GetMetricsInternalServerError) IsClientError() bool { - return false -} - -// IsServerError returns true when this get metrics internal server error response has a 5xx status code -func (o *GetMetricsInternalServerError) IsServerError() bool { - return true -} - -// IsCode returns true when this get metrics internal server error response a status code equal to that given -func (o *GetMetricsInternalServerError) IsCode(code int) bool { - return code == 500 -} - -func (o *GetMetricsInternalServerError) Error() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsInternalServerError ", 500) -} - -func (o *GetMetricsInternalServerError) String() string { - return fmt.Sprintf("[GET /metrics/][%d] getMetricsInternalServerError ", 500) -} - -func (o *GetMetricsInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go b/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go deleted file mode 100644 index d51858ef4..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/metrics/metrics_client.go +++ /dev/null @@ -1,82 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package metrics - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new metrics API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for metrics API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - GetMetrics(params *GetMetricsParams, opts ...ClientOption) (*GetMetricsOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -GetMetrics retrieves cilium metrics -*/ -func (a *Client) GetMetrics(params *GetMetricsParams, opts ...ClientOption) (*GetMetricsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetMetricsParams() - } - op := &runtime.ClientOperation{ - ID: "GetMetrics", - Method: "GET", - PathPattern: "/metrics/", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetMetricsReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetMetricsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetMetrics: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_parameters.go deleted file mode 100644 index c53a2f7cf..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewDeleteFqdnCacheParams creates a new DeleteFqdnCacheParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewDeleteFqdnCacheParams() *DeleteFqdnCacheParams { - return &DeleteFqdnCacheParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewDeleteFqdnCacheParamsWithTimeout creates a new DeleteFqdnCacheParams object -// with the ability to set a timeout on a request. -func NewDeleteFqdnCacheParamsWithTimeout(timeout time.Duration) *DeleteFqdnCacheParams { - return &DeleteFqdnCacheParams{ - timeout: timeout, - } -} - -// NewDeleteFqdnCacheParamsWithContext creates a new DeleteFqdnCacheParams object -// with the ability to set a context for a request. -func NewDeleteFqdnCacheParamsWithContext(ctx context.Context) *DeleteFqdnCacheParams { - return &DeleteFqdnCacheParams{ - Context: ctx, - } -} - -// NewDeleteFqdnCacheParamsWithHTTPClient creates a new DeleteFqdnCacheParams object -// with the ability to set a custom HTTPClient for a request. -func NewDeleteFqdnCacheParamsWithHTTPClient(client *http.Client) *DeleteFqdnCacheParams { - return &DeleteFqdnCacheParams{ - HTTPClient: client, - } -} - -/* -DeleteFqdnCacheParams contains all the parameters to send to the API endpoint - - for the delete fqdn cache operation. - - Typically these are written to a http.Request. -*/ -type DeleteFqdnCacheParams struct { - - /* Matchpattern. - - A toFQDNs compatible matchPattern expression - */ - Matchpattern *string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the delete fqdn cache params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteFqdnCacheParams) WithDefaults() *DeleteFqdnCacheParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the delete fqdn cache params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteFqdnCacheParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) WithTimeout(timeout time.Duration) *DeleteFqdnCacheParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) WithContext(ctx context.Context) *DeleteFqdnCacheParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) WithHTTPClient(client *http.Client) *DeleteFqdnCacheParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithMatchpattern adds the matchpattern to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) WithMatchpattern(matchpattern *string) *DeleteFqdnCacheParams { - o.SetMatchpattern(matchpattern) - return o -} - -// SetMatchpattern adds the matchpattern to the delete fqdn cache params -func (o *DeleteFqdnCacheParams) SetMatchpattern(matchpattern *string) { - o.Matchpattern = matchpattern -} - -// WriteToRequest writes these params to a swagger request -func (o *DeleteFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.Matchpattern != nil { - - // query param matchpattern - var qrMatchpattern string - - if o.Matchpattern != nil { - qrMatchpattern = *o.Matchpattern - } - qMatchpattern := qrMatchpattern - if qMatchpattern != "" { - - if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil { - return err - } - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go deleted file mode 100644 index 1fe001c66..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_fqdn_cache_responses.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// DeleteFqdnCacheReader is a Reader for the DeleteFqdnCache structure. -type DeleteFqdnCacheReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *DeleteFqdnCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewDeleteFqdnCacheOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewDeleteFqdnCacheBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewDeleteFqdnCacheOK creates a DeleteFqdnCacheOK with default headers values -func NewDeleteFqdnCacheOK() *DeleteFqdnCacheOK { - return &DeleteFqdnCacheOK{} -} - -/* -DeleteFqdnCacheOK describes a response with status code 200, with default header values. - -Success -*/ -type DeleteFqdnCacheOK struct { -} - -// IsSuccess returns true when this delete fqdn cache o k response has a 2xx status code -func (o *DeleteFqdnCacheOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete fqdn cache o k response has a 3xx status code -func (o *DeleteFqdnCacheOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete fqdn cache o k response has a 4xx status code -func (o *DeleteFqdnCacheOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete fqdn cache o k response has a 5xx status code -func (o *DeleteFqdnCacheOK) IsServerError() bool { - return false -} - -// IsCode returns true when this delete fqdn cache o k response a status code equal to that given -func (o *DeleteFqdnCacheOK) IsCode(code int) bool { - return code == 200 -} - -func (o *DeleteFqdnCacheOK) Error() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK ", 200) -} - -func (o *DeleteFqdnCacheOK) String() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheOK ", 200) -} - -func (o *DeleteFqdnCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteFqdnCacheBadRequest creates a DeleteFqdnCacheBadRequest with default headers values -func NewDeleteFqdnCacheBadRequest() *DeleteFqdnCacheBadRequest { - return &DeleteFqdnCacheBadRequest{} -} - -/* -DeleteFqdnCacheBadRequest describes a response with status code 400, with default header values. - -Invalid request (error parsing parameters) -*/ -type DeleteFqdnCacheBadRequest struct { - Payload models.Error -} - -// IsSuccess returns true when this delete fqdn cache bad request response has a 2xx status code -func (o *DeleteFqdnCacheBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete fqdn cache bad request response has a 3xx status code -func (o *DeleteFqdnCacheBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete fqdn cache bad request response has a 4xx status code -func (o *DeleteFqdnCacheBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete fqdn cache bad request response has a 5xx status code -func (o *DeleteFqdnCacheBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this delete fqdn cache bad request response a status code equal to that given -func (o *DeleteFqdnCacheBadRequest) IsCode(code int) bool { - return code == 400 -} - -func (o *DeleteFqdnCacheBadRequest) Error() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %+v", 400, o.Payload) -} - -func (o *DeleteFqdnCacheBadRequest) String() string { - return fmt.Sprintf("[DELETE /fqdn/cache][%d] deleteFqdnCacheBadRequest %+v", 400, o.Payload) -} - -func (o *DeleteFqdnCacheBadRequest) GetPayload() models.Error { - return o.Payload -} - -func (o *DeleteFqdnCacheBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_parameters.go deleted file mode 100644 index 437ef67af..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_parameters.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewDeletePolicyParams creates a new DeletePolicyParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewDeletePolicyParams() *DeletePolicyParams { - return &DeletePolicyParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewDeletePolicyParamsWithTimeout creates a new DeletePolicyParams object -// with the ability to set a timeout on a request. -func NewDeletePolicyParamsWithTimeout(timeout time.Duration) *DeletePolicyParams { - return &DeletePolicyParams{ - timeout: timeout, - } -} - -// NewDeletePolicyParamsWithContext creates a new DeletePolicyParams object -// with the ability to set a context for a request. -func NewDeletePolicyParamsWithContext(ctx context.Context) *DeletePolicyParams { - return &DeletePolicyParams{ - Context: ctx, - } -} - -// NewDeletePolicyParamsWithHTTPClient creates a new DeletePolicyParams object -// with the ability to set a custom HTTPClient for a request. -func NewDeletePolicyParamsWithHTTPClient(client *http.Client) *DeletePolicyParams { - return &DeletePolicyParams{ - HTTPClient: client, - } -} - -/* -DeletePolicyParams contains all the parameters to send to the API endpoint - - for the delete policy operation. - - Typically these are written to a http.Request. -*/ -type DeletePolicyParams struct { - - // Labels. - Labels models.Labels - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the delete policy params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeletePolicyParams) WithDefaults() *DeletePolicyParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the delete policy params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeletePolicyParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the delete policy params -func (o *DeletePolicyParams) WithTimeout(timeout time.Duration) *DeletePolicyParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the delete policy params -func (o *DeletePolicyParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the delete policy params -func (o *DeletePolicyParams) WithContext(ctx context.Context) *DeletePolicyParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the delete policy params -func (o *DeletePolicyParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the delete policy params -func (o *DeletePolicyParams) WithHTTPClient(client *http.Client) *DeletePolicyParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the delete policy params -func (o *DeletePolicyParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithLabels adds the labels to the delete policy params -func (o *DeletePolicyParams) WithLabels(labels models.Labels) *DeletePolicyParams { - o.SetLabels(labels) - return o -} - -// SetLabels adds the labels to the delete policy params -func (o *DeletePolicyParams) SetLabels(labels models.Labels) { - o.Labels = labels -} - -// WriteToRequest writes these params to a swagger request -func (o *DeletePolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Labels != nil { - if err := r.SetBodyParam(o.Labels); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go deleted file mode 100644 index c36694d32..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/delete_policy_responses.go +++ /dev/null @@ -1,292 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// DeletePolicyReader is a Reader for the DeletePolicy structure. -type DeletePolicyReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *DeletePolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewDeletePolicyOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewDeletePolicyInvalid() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewDeletePolicyNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewDeletePolicyFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewDeletePolicyOK creates a DeletePolicyOK with default headers values -func NewDeletePolicyOK() *DeletePolicyOK { - return &DeletePolicyOK{} -} - -/* -DeletePolicyOK describes a response with status code 200, with default header values. - -Success -*/ -type DeletePolicyOK struct { - Payload *models.Policy -} - -// IsSuccess returns true when this delete policy o k response has a 2xx status code -func (o *DeletePolicyOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete policy o k response has a 3xx status code -func (o *DeletePolicyOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete policy o k response has a 4xx status code -func (o *DeletePolicyOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete policy o k response has a 5xx status code -func (o *DeletePolicyOK) IsServerError() bool { - return false -} - -// IsCode returns true when this delete policy o k response a status code equal to that given -func (o *DeletePolicyOK) IsCode(code int) bool { - return code == 200 -} - -func (o *DeletePolicyOK) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %+v", 200, o.Payload) -} - -func (o *DeletePolicyOK) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyOK %+v", 200, o.Payload) -} - -func (o *DeletePolicyOK) GetPayload() *models.Policy { - return o.Payload -} - -func (o *DeletePolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Policy) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewDeletePolicyInvalid creates a DeletePolicyInvalid with default headers values -func NewDeletePolicyInvalid() *DeletePolicyInvalid { - return &DeletePolicyInvalid{} -} - -/* -DeletePolicyInvalid describes a response with status code 400, with default header values. - -Invalid request -*/ -type DeletePolicyInvalid struct { - Payload models.Error -} - -// IsSuccess returns true when this delete policy invalid response has a 2xx status code -func (o *DeletePolicyInvalid) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete policy invalid response has a 3xx status code -func (o *DeletePolicyInvalid) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete policy invalid response has a 4xx status code -func (o *DeletePolicyInvalid) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete policy invalid response has a 5xx status code -func (o *DeletePolicyInvalid) IsServerError() bool { - return false -} - -// IsCode returns true when this delete policy invalid response a status code equal to that given -func (o *DeletePolicyInvalid) IsCode(code int) bool { - return code == 400 -} - -func (o *DeletePolicyInvalid) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %+v", 400, o.Payload) -} - -func (o *DeletePolicyInvalid) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyInvalid %+v", 400, o.Payload) -} - -func (o *DeletePolicyInvalid) GetPayload() models.Error { - return o.Payload -} - -func (o *DeletePolicyInvalid) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewDeletePolicyNotFound creates a DeletePolicyNotFound with default headers values -func NewDeletePolicyNotFound() *DeletePolicyNotFound { - return &DeletePolicyNotFound{} -} - -/* -DeletePolicyNotFound describes a response with status code 404, with default header values. - -Policy not found -*/ -type DeletePolicyNotFound struct { -} - -// IsSuccess returns true when this delete policy not found response has a 2xx status code -func (o *DeletePolicyNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete policy not found response has a 3xx status code -func (o *DeletePolicyNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete policy not found response has a 4xx status code -func (o *DeletePolicyNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete policy not found response has a 5xx status code -func (o *DeletePolicyNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this delete policy not found response a status code equal to that given -func (o *DeletePolicyNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *DeletePolicyNotFound) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound ", 404) -} - -func (o *DeletePolicyNotFound) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyNotFound ", 404) -} - -func (o *DeletePolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeletePolicyFailure creates a DeletePolicyFailure with default headers values -func NewDeletePolicyFailure() *DeletePolicyFailure { - return &DeletePolicyFailure{} -} - -/* -DeletePolicyFailure describes a response with status code 500, with default header values. - -Error while deleting policy -*/ -type DeletePolicyFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this delete policy failure response has a 2xx status code -func (o *DeletePolicyFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete policy failure response has a 3xx status code -func (o *DeletePolicyFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete policy failure response has a 4xx status code -func (o *DeletePolicyFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete policy failure response has a 5xx status code -func (o *DeletePolicyFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this delete policy failure response a status code equal to that given -func (o *DeletePolicyFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *DeletePolicyFailure) Error() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %+v", 500, o.Payload) -} - -func (o *DeletePolicyFailure) String() string { - return fmt.Sprintf("[DELETE /policy][%d] deletePolicyFailure %+v", 500, o.Payload) -} - -func (o *DeletePolicyFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *DeletePolicyFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go deleted file mode 100644 index ea3b520e5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_parameters.go +++ /dev/null @@ -1,268 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetFqdnCacheIDParams creates a new GetFqdnCacheIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetFqdnCacheIDParams() *GetFqdnCacheIDParams { - return &GetFqdnCacheIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetFqdnCacheIDParamsWithTimeout creates a new GetFqdnCacheIDParams object -// with the ability to set a timeout on a request. -func NewGetFqdnCacheIDParamsWithTimeout(timeout time.Duration) *GetFqdnCacheIDParams { - return &GetFqdnCacheIDParams{ - timeout: timeout, - } -} - -// NewGetFqdnCacheIDParamsWithContext creates a new GetFqdnCacheIDParams object -// with the ability to set a context for a request. -func NewGetFqdnCacheIDParamsWithContext(ctx context.Context) *GetFqdnCacheIDParams { - return &GetFqdnCacheIDParams{ - Context: ctx, - } -} - -// NewGetFqdnCacheIDParamsWithHTTPClient creates a new GetFqdnCacheIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetFqdnCacheIDParamsWithHTTPClient(client *http.Client) *GetFqdnCacheIDParams { - return &GetFqdnCacheIDParams{ - HTTPClient: client, - } -} - -/* -GetFqdnCacheIDParams contains all the parameters to send to the API endpoint - - for the get fqdn cache ID operation. - - Typically these are written to a http.Request. -*/ -type GetFqdnCacheIDParams struct { - - /* Cidr. - - A CIDR range of IPs - */ - Cidr *string - - /* ID. - - String describing an endpoint with the format ``[prefix:]id``. If no prefix - is specified, a prefix of ``cilium-local:`` is assumed. Not all endpoints - will be addressable by all endpoint ID prefixes with the exception of the - local Cilium UUID which is assigned to all endpoints. - - Supported endpoint id prefixes: - - cilium-local: Local Cilium endpoint UUID, e.g. cilium-local:3389595 - - cilium-global: Global Cilium endpoint UUID, e.g. cilium-global:cluster1:nodeX:452343 - - container-id: Container runtime ID, e.g. container-id:22222 - - container-name: Container name, e.g. container-name:foobar - - pod-name: pod name for this container if K8s is enabled, e.g. pod-name:default:foobar - - docker-endpoint: Docker libnetwork endpoint ID, e.g. docker-endpoint:4444 - - */ - ID string - - /* Matchpattern. - - A toFQDNs compatible matchPattern expression - */ - Matchpattern *string - - /* Source. - - Source from which FQDN entries come from - */ - Source *string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get fqdn cache ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetFqdnCacheIDParams) WithDefaults() *GetFqdnCacheIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get fqdn cache ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetFqdnCacheIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) WithTimeout(timeout time.Duration) *GetFqdnCacheIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) WithContext(ctx context.Context) *GetFqdnCacheIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) WithHTTPClient(client *http.Client) *GetFqdnCacheIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithCidr adds the cidr to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) WithCidr(cidr *string) *GetFqdnCacheIDParams { - o.SetCidr(cidr) - return o -} - -// SetCidr adds the cidr to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) SetCidr(cidr *string) { - o.Cidr = cidr -} - -// WithID adds the id to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) WithID(id string) *GetFqdnCacheIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) SetID(id string) { - o.ID = id -} - -// WithMatchpattern adds the matchpattern to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) WithMatchpattern(matchpattern *string) *GetFqdnCacheIDParams { - o.SetMatchpattern(matchpattern) - return o -} - -// SetMatchpattern adds the matchpattern to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) SetMatchpattern(matchpattern *string) { - o.Matchpattern = matchpattern -} - -// WithSource adds the source to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) WithSource(source *string) *GetFqdnCacheIDParams { - o.SetSource(source) - return o -} - -// SetSource adds the source to the get fqdn cache ID params -func (o *GetFqdnCacheIDParams) SetSource(source *string) { - o.Source = source -} - -// WriteToRequest writes these params to a swagger request -func (o *GetFqdnCacheIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.Cidr != nil { - - // query param cidr - var qrCidr string - - if o.Cidr != nil { - qrCidr = *o.Cidr - } - qCidr := qrCidr - if qCidr != "" { - - if err := r.SetQueryParam("cidr", qCidr); err != nil { - return err - } - } - } - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if o.Matchpattern != nil { - - // query param matchpattern - var qrMatchpattern string - - if o.Matchpattern != nil { - qrMatchpattern = *o.Matchpattern - } - qMatchpattern := qrMatchpattern - if qMatchpattern != "" { - - if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil { - return err - } - } - } - - if o.Source != nil { - - // query param source - var qrSource string - - if o.Source != nil { - qrSource = *o.Source - } - qSource := qrSource - if qSource != "" { - - if err := r.SetQueryParam("source", qSource); err != nil { - return err - } - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go deleted file mode 100644 index 7d4297523..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_id_responses.go +++ /dev/null @@ -1,223 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetFqdnCacheIDReader is a Reader for the GetFqdnCacheID structure. -type GetFqdnCacheIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetFqdnCacheIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetFqdnCacheIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetFqdnCacheIDBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewGetFqdnCacheIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetFqdnCacheIDOK creates a GetFqdnCacheIDOK with default headers values -func NewGetFqdnCacheIDOK() *GetFqdnCacheIDOK { - return &GetFqdnCacheIDOK{} -} - -/* -GetFqdnCacheIDOK describes a response with status code 200, with default header values. - -Success -*/ -type GetFqdnCacheIDOK struct { - Payload []*models.DNSLookup -} - -// IsSuccess returns true when this get fqdn cache Id o k response has a 2xx status code -func (o *GetFqdnCacheIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get fqdn cache Id o k response has a 3xx status code -func (o *GetFqdnCacheIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn cache Id o k response has a 4xx status code -func (o *GetFqdnCacheIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get fqdn cache Id o k response has a 5xx status code -func (o *GetFqdnCacheIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn cache Id o k response a status code equal to that given -func (o *GetFqdnCacheIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetFqdnCacheIDOK) Error() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %+v", 200, o.Payload) -} - -func (o *GetFqdnCacheIDOK) String() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdOK %+v", 200, o.Payload) -} - -func (o *GetFqdnCacheIDOK) GetPayload() []*models.DNSLookup { - return o.Payload -} - -func (o *GetFqdnCacheIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetFqdnCacheIDBadRequest creates a GetFqdnCacheIDBadRequest with default headers values -func NewGetFqdnCacheIDBadRequest() *GetFqdnCacheIDBadRequest { - return &GetFqdnCacheIDBadRequest{} -} - -/* -GetFqdnCacheIDBadRequest describes a response with status code 400, with default header values. - -Invalid request (error parsing parameters) -*/ -type GetFqdnCacheIDBadRequest struct { - Payload models.Error -} - -// IsSuccess returns true when this get fqdn cache Id bad request response has a 2xx status code -func (o *GetFqdnCacheIDBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get fqdn cache Id bad request response has a 3xx status code -func (o *GetFqdnCacheIDBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn cache Id bad request response has a 4xx status code -func (o *GetFqdnCacheIDBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this get fqdn cache Id bad request response has a 5xx status code -func (o *GetFqdnCacheIDBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn cache Id bad request response a status code equal to that given -func (o *GetFqdnCacheIDBadRequest) IsCode(code int) bool { - return code == 400 -} - -func (o *GetFqdnCacheIDBadRequest) Error() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %+v", 400, o.Payload) -} - -func (o *GetFqdnCacheIDBadRequest) String() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdBadRequest %+v", 400, o.Payload) -} - -func (o *GetFqdnCacheIDBadRequest) GetPayload() models.Error { - return o.Payload -} - -func (o *GetFqdnCacheIDBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetFqdnCacheIDNotFound creates a GetFqdnCacheIDNotFound with default headers values -func NewGetFqdnCacheIDNotFound() *GetFqdnCacheIDNotFound { - return &GetFqdnCacheIDNotFound{} -} - -/* -GetFqdnCacheIDNotFound describes a response with status code 404, with default header values. - -No DNS data with provided parameters found -*/ -type GetFqdnCacheIDNotFound struct { -} - -// IsSuccess returns true when this get fqdn cache Id not found response has a 2xx status code -func (o *GetFqdnCacheIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get fqdn cache Id not found response has a 3xx status code -func (o *GetFqdnCacheIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn cache Id not found response has a 4xx status code -func (o *GetFqdnCacheIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get fqdn cache Id not found response has a 5xx status code -func (o *GetFqdnCacheIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn cache Id not found response a status code equal to that given -func (o *GetFqdnCacheIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetFqdnCacheIDNotFound) Error() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound ", 404) -} - -func (o *GetFqdnCacheIDNotFound) String() string { - return fmt.Sprintf("[GET /fqdn/cache/{id}][%d] getFqdnCacheIdNotFound ", 404) -} - -func (o *GetFqdnCacheIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_parameters.go deleted file mode 100644 index d6eb10e11..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_parameters.go +++ /dev/null @@ -1,234 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetFqdnCacheParams creates a new GetFqdnCacheParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetFqdnCacheParams() *GetFqdnCacheParams { - return &GetFqdnCacheParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetFqdnCacheParamsWithTimeout creates a new GetFqdnCacheParams object -// with the ability to set a timeout on a request. -func NewGetFqdnCacheParamsWithTimeout(timeout time.Duration) *GetFqdnCacheParams { - return &GetFqdnCacheParams{ - timeout: timeout, - } -} - -// NewGetFqdnCacheParamsWithContext creates a new GetFqdnCacheParams object -// with the ability to set a context for a request. -func NewGetFqdnCacheParamsWithContext(ctx context.Context) *GetFqdnCacheParams { - return &GetFqdnCacheParams{ - Context: ctx, - } -} - -// NewGetFqdnCacheParamsWithHTTPClient creates a new GetFqdnCacheParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetFqdnCacheParamsWithHTTPClient(client *http.Client) *GetFqdnCacheParams { - return &GetFqdnCacheParams{ - HTTPClient: client, - } -} - -/* -GetFqdnCacheParams contains all the parameters to send to the API endpoint - - for the get fqdn cache operation. - - Typically these are written to a http.Request. -*/ -type GetFqdnCacheParams struct { - - /* Cidr. - - A CIDR range of IPs - */ - Cidr *string - - /* Matchpattern. - - A toFQDNs compatible matchPattern expression - */ - Matchpattern *string - - /* Source. - - Source from which FQDN entries come from - */ - Source *string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get fqdn cache params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetFqdnCacheParams) WithDefaults() *GetFqdnCacheParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get fqdn cache params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetFqdnCacheParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get fqdn cache params -func (o *GetFqdnCacheParams) WithTimeout(timeout time.Duration) *GetFqdnCacheParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get fqdn cache params -func (o *GetFqdnCacheParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get fqdn cache params -func (o *GetFqdnCacheParams) WithContext(ctx context.Context) *GetFqdnCacheParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get fqdn cache params -func (o *GetFqdnCacheParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get fqdn cache params -func (o *GetFqdnCacheParams) WithHTTPClient(client *http.Client) *GetFqdnCacheParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get fqdn cache params -func (o *GetFqdnCacheParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithCidr adds the cidr to the get fqdn cache params -func (o *GetFqdnCacheParams) WithCidr(cidr *string) *GetFqdnCacheParams { - o.SetCidr(cidr) - return o -} - -// SetCidr adds the cidr to the get fqdn cache params -func (o *GetFqdnCacheParams) SetCidr(cidr *string) { - o.Cidr = cidr -} - -// WithMatchpattern adds the matchpattern to the get fqdn cache params -func (o *GetFqdnCacheParams) WithMatchpattern(matchpattern *string) *GetFqdnCacheParams { - o.SetMatchpattern(matchpattern) - return o -} - -// SetMatchpattern adds the matchpattern to the get fqdn cache params -func (o *GetFqdnCacheParams) SetMatchpattern(matchpattern *string) { - o.Matchpattern = matchpattern -} - -// WithSource adds the source to the get fqdn cache params -func (o *GetFqdnCacheParams) WithSource(source *string) *GetFqdnCacheParams { - o.SetSource(source) - return o -} - -// SetSource adds the source to the get fqdn cache params -func (o *GetFqdnCacheParams) SetSource(source *string) { - o.Source = source -} - -// WriteToRequest writes these params to a swagger request -func (o *GetFqdnCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.Cidr != nil { - - // query param cidr - var qrCidr string - - if o.Cidr != nil { - qrCidr = *o.Cidr - } - qCidr := qrCidr - if qCidr != "" { - - if err := r.SetQueryParam("cidr", qCidr); err != nil { - return err - } - } - } - - if o.Matchpattern != nil { - - // query param matchpattern - var qrMatchpattern string - - if o.Matchpattern != nil { - qrMatchpattern = *o.Matchpattern - } - qMatchpattern := qrMatchpattern - if qMatchpattern != "" { - - if err := r.SetQueryParam("matchpattern", qMatchpattern); err != nil { - return err - } - } - } - - if o.Source != nil { - - // query param source - var qrSource string - - if o.Source != nil { - qrSource = *o.Source - } - qSource := qrSource - if qSource != "" { - - if err := r.SetQueryParam("source", qSource); err != nil { - return err - } - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go deleted file mode 100644 index 775a0fc20..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_cache_responses.go +++ /dev/null @@ -1,223 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetFqdnCacheReader is a Reader for the GetFqdnCache structure. -type GetFqdnCacheReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetFqdnCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetFqdnCacheOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetFqdnCacheBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewGetFqdnCacheNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetFqdnCacheOK creates a GetFqdnCacheOK with default headers values -func NewGetFqdnCacheOK() *GetFqdnCacheOK { - return &GetFqdnCacheOK{} -} - -/* -GetFqdnCacheOK describes a response with status code 200, with default header values. - -Success -*/ -type GetFqdnCacheOK struct { - Payload []*models.DNSLookup -} - -// IsSuccess returns true when this get fqdn cache o k response has a 2xx status code -func (o *GetFqdnCacheOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get fqdn cache o k response has a 3xx status code -func (o *GetFqdnCacheOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn cache o k response has a 4xx status code -func (o *GetFqdnCacheOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get fqdn cache o k response has a 5xx status code -func (o *GetFqdnCacheOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn cache o k response a status code equal to that given -func (o *GetFqdnCacheOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetFqdnCacheOK) Error() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %+v", 200, o.Payload) -} - -func (o *GetFqdnCacheOK) String() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheOK %+v", 200, o.Payload) -} - -func (o *GetFqdnCacheOK) GetPayload() []*models.DNSLookup { - return o.Payload -} - -func (o *GetFqdnCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetFqdnCacheBadRequest creates a GetFqdnCacheBadRequest with default headers values -func NewGetFqdnCacheBadRequest() *GetFqdnCacheBadRequest { - return &GetFqdnCacheBadRequest{} -} - -/* -GetFqdnCacheBadRequest describes a response with status code 400, with default header values. - -Invalid request (error parsing parameters) -*/ -type GetFqdnCacheBadRequest struct { - Payload models.Error -} - -// IsSuccess returns true when this get fqdn cache bad request response has a 2xx status code -func (o *GetFqdnCacheBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get fqdn cache bad request response has a 3xx status code -func (o *GetFqdnCacheBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn cache bad request response has a 4xx status code -func (o *GetFqdnCacheBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this get fqdn cache bad request response has a 5xx status code -func (o *GetFqdnCacheBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn cache bad request response a status code equal to that given -func (o *GetFqdnCacheBadRequest) IsCode(code int) bool { - return code == 400 -} - -func (o *GetFqdnCacheBadRequest) Error() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %+v", 400, o.Payload) -} - -func (o *GetFqdnCacheBadRequest) String() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheBadRequest %+v", 400, o.Payload) -} - -func (o *GetFqdnCacheBadRequest) GetPayload() models.Error { - return o.Payload -} - -func (o *GetFqdnCacheBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetFqdnCacheNotFound creates a GetFqdnCacheNotFound with default headers values -func NewGetFqdnCacheNotFound() *GetFqdnCacheNotFound { - return &GetFqdnCacheNotFound{} -} - -/* -GetFqdnCacheNotFound describes a response with status code 404, with default header values. - -No DNS data with provided parameters found -*/ -type GetFqdnCacheNotFound struct { -} - -// IsSuccess returns true when this get fqdn cache not found response has a 2xx status code -func (o *GetFqdnCacheNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get fqdn cache not found response has a 3xx status code -func (o *GetFqdnCacheNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn cache not found response has a 4xx status code -func (o *GetFqdnCacheNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get fqdn cache not found response has a 5xx status code -func (o *GetFqdnCacheNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn cache not found response a status code equal to that given -func (o *GetFqdnCacheNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetFqdnCacheNotFound) Error() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound ", 404) -} - -func (o *GetFqdnCacheNotFound) String() string { - return fmt.Sprintf("[GET /fqdn/cache][%d] getFqdnCacheNotFound ", 404) -} - -func (o *GetFqdnCacheNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_parameters.go deleted file mode 100644 index 1b6743281..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetFqdnNamesParams creates a new GetFqdnNamesParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetFqdnNamesParams() *GetFqdnNamesParams { - return &GetFqdnNamesParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetFqdnNamesParamsWithTimeout creates a new GetFqdnNamesParams object -// with the ability to set a timeout on a request. -func NewGetFqdnNamesParamsWithTimeout(timeout time.Duration) *GetFqdnNamesParams { - return &GetFqdnNamesParams{ - timeout: timeout, - } -} - -// NewGetFqdnNamesParamsWithContext creates a new GetFqdnNamesParams object -// with the ability to set a context for a request. -func NewGetFqdnNamesParamsWithContext(ctx context.Context) *GetFqdnNamesParams { - return &GetFqdnNamesParams{ - Context: ctx, - } -} - -// NewGetFqdnNamesParamsWithHTTPClient creates a new GetFqdnNamesParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetFqdnNamesParamsWithHTTPClient(client *http.Client) *GetFqdnNamesParams { - return &GetFqdnNamesParams{ - HTTPClient: client, - } -} - -/* -GetFqdnNamesParams contains all the parameters to send to the API endpoint - - for the get fqdn names operation. - - Typically these are written to a http.Request. -*/ -type GetFqdnNamesParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get fqdn names params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetFqdnNamesParams) WithDefaults() *GetFqdnNamesParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get fqdn names params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetFqdnNamesParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get fqdn names params -func (o *GetFqdnNamesParams) WithTimeout(timeout time.Duration) *GetFqdnNamesParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get fqdn names params -func (o *GetFqdnNamesParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get fqdn names params -func (o *GetFqdnNamesParams) WithContext(ctx context.Context) *GetFqdnNamesParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get fqdn names params -func (o *GetFqdnNamesParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get fqdn names params -func (o *GetFqdnNamesParams) WithHTTPClient(client *http.Client) *GetFqdnNamesParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get fqdn names params -func (o *GetFqdnNamesParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetFqdnNamesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go deleted file mode 100644 index b2b160db6..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_fqdn_names_responses.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetFqdnNamesReader is a Reader for the GetFqdnNames structure. -type GetFqdnNamesReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetFqdnNamesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetFqdnNamesOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetFqdnNamesBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetFqdnNamesOK creates a GetFqdnNamesOK with default headers values -func NewGetFqdnNamesOK() *GetFqdnNamesOK { - return &GetFqdnNamesOK{} -} - -/* -GetFqdnNamesOK describes a response with status code 200, with default header values. - -Success -*/ -type GetFqdnNamesOK struct { - Payload *models.NameManager -} - -// IsSuccess returns true when this get fqdn names o k response has a 2xx status code -func (o *GetFqdnNamesOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get fqdn names o k response has a 3xx status code -func (o *GetFqdnNamesOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn names o k response has a 4xx status code -func (o *GetFqdnNamesOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get fqdn names o k response has a 5xx status code -func (o *GetFqdnNamesOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn names o k response a status code equal to that given -func (o *GetFqdnNamesOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetFqdnNamesOK) Error() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %+v", 200, o.Payload) -} - -func (o *GetFqdnNamesOK) String() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesOK %+v", 200, o.Payload) -} - -func (o *GetFqdnNamesOK) GetPayload() *models.NameManager { - return o.Payload -} - -func (o *GetFqdnNamesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.NameManager) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetFqdnNamesBadRequest creates a GetFqdnNamesBadRequest with default headers values -func NewGetFqdnNamesBadRequest() *GetFqdnNamesBadRequest { - return &GetFqdnNamesBadRequest{} -} - -/* -GetFqdnNamesBadRequest describes a response with status code 400, with default header values. - -Invalid request (error parsing parameters) -*/ -type GetFqdnNamesBadRequest struct { - Payload models.Error -} - -// IsSuccess returns true when this get fqdn names bad request response has a 2xx status code -func (o *GetFqdnNamesBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get fqdn names bad request response has a 3xx status code -func (o *GetFqdnNamesBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get fqdn names bad request response has a 4xx status code -func (o *GetFqdnNamesBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this get fqdn names bad request response has a 5xx status code -func (o *GetFqdnNamesBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this get fqdn names bad request response a status code equal to that given -func (o *GetFqdnNamesBadRequest) IsCode(code int) bool { - return code == 400 -} - -func (o *GetFqdnNamesBadRequest) Error() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %+v", 400, o.Payload) -} - -func (o *GetFqdnNamesBadRequest) String() string { - return fmt.Sprintf("[GET /fqdn/names][%d] getFqdnNamesBadRequest %+v", 400, o.Payload) -} - -func (o *GetFqdnNamesBadRequest) GetPayload() models.Error { - return o.Payload -} - -func (o *GetFqdnNamesBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_parameters.go deleted file mode 100644 index 6a2182829..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetIdentityEndpointsParams creates a new GetIdentityEndpointsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetIdentityEndpointsParams() *GetIdentityEndpointsParams { - return &GetIdentityEndpointsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetIdentityEndpointsParamsWithTimeout creates a new GetIdentityEndpointsParams object -// with the ability to set a timeout on a request. -func NewGetIdentityEndpointsParamsWithTimeout(timeout time.Duration) *GetIdentityEndpointsParams { - return &GetIdentityEndpointsParams{ - timeout: timeout, - } -} - -// NewGetIdentityEndpointsParamsWithContext creates a new GetIdentityEndpointsParams object -// with the ability to set a context for a request. -func NewGetIdentityEndpointsParamsWithContext(ctx context.Context) *GetIdentityEndpointsParams { - return &GetIdentityEndpointsParams{ - Context: ctx, - } -} - -// NewGetIdentityEndpointsParamsWithHTTPClient creates a new GetIdentityEndpointsParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetIdentityEndpointsParamsWithHTTPClient(client *http.Client) *GetIdentityEndpointsParams { - return &GetIdentityEndpointsParams{ - HTTPClient: client, - } -} - -/* -GetIdentityEndpointsParams contains all the parameters to send to the API endpoint - - for the get identity endpoints operation. - - Typically these are written to a http.Request. -*/ -type GetIdentityEndpointsParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get identity endpoints params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIdentityEndpointsParams) WithDefaults() *GetIdentityEndpointsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get identity endpoints params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIdentityEndpointsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get identity endpoints params -func (o *GetIdentityEndpointsParams) WithTimeout(timeout time.Duration) *GetIdentityEndpointsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get identity endpoints params -func (o *GetIdentityEndpointsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get identity endpoints params -func (o *GetIdentityEndpointsParams) WithContext(ctx context.Context) *GetIdentityEndpointsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get identity endpoints params -func (o *GetIdentityEndpointsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get identity endpoints params -func (o *GetIdentityEndpointsParams) WithHTTPClient(client *http.Client) *GetIdentityEndpointsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get identity endpoints params -func (o *GetIdentityEndpointsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetIdentityEndpointsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go deleted file mode 100644 index 0608766c8..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_endpoints_responses.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetIdentityEndpointsReader is a Reader for the GetIdentityEndpoints structure. -type GetIdentityEndpointsReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetIdentityEndpointsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetIdentityEndpointsOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetIdentityEndpointsNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetIdentityEndpointsOK creates a GetIdentityEndpointsOK with default headers values -func NewGetIdentityEndpointsOK() *GetIdentityEndpointsOK { - return &GetIdentityEndpointsOK{} -} - -/* -GetIdentityEndpointsOK describes a response with status code 200, with default header values. - -Success -*/ -type GetIdentityEndpointsOK struct { - Payload []*models.IdentityEndpoints -} - -// IsSuccess returns true when this get identity endpoints o k response has a 2xx status code -func (o *GetIdentityEndpointsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get identity endpoints o k response has a 3xx status code -func (o *GetIdentityEndpointsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity endpoints o k response has a 4xx status code -func (o *GetIdentityEndpointsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get identity endpoints o k response has a 5xx status code -func (o *GetIdentityEndpointsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get identity endpoints o k response a status code equal to that given -func (o *GetIdentityEndpointsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetIdentityEndpointsOK) Error() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %+v", 200, o.Payload) -} - -func (o *GetIdentityEndpointsOK) String() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsOK %+v", 200, o.Payload) -} - -func (o *GetIdentityEndpointsOK) GetPayload() []*models.IdentityEndpoints { - return o.Payload -} - -func (o *GetIdentityEndpointsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetIdentityEndpointsNotFound creates a GetIdentityEndpointsNotFound with default headers values -func NewGetIdentityEndpointsNotFound() *GetIdentityEndpointsNotFound { - return &GetIdentityEndpointsNotFound{} -} - -/* -GetIdentityEndpointsNotFound describes a response with status code 404, with default header values. - -Set of identities which are being used by local endpoints could not be found. -*/ -type GetIdentityEndpointsNotFound struct { -} - -// IsSuccess returns true when this get identity endpoints not found response has a 2xx status code -func (o *GetIdentityEndpointsNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity endpoints not found response has a 3xx status code -func (o *GetIdentityEndpointsNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity endpoints not found response has a 4xx status code -func (o *GetIdentityEndpointsNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get identity endpoints not found response has a 5xx status code -func (o *GetIdentityEndpointsNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get identity endpoints not found response a status code equal to that given -func (o *GetIdentityEndpointsNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetIdentityEndpointsNotFound) Error() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound ", 404) -} - -func (o *GetIdentityEndpointsNotFound) String() string { - return fmt.Sprintf("[GET /identity/endpoints][%d] getIdentityEndpointsNotFound ", 404) -} - -func (o *GetIdentityEndpointsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_parameters.go deleted file mode 100644 index f17fa6651..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_parameters.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetIdentityIDParams creates a new GetIdentityIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetIdentityIDParams() *GetIdentityIDParams { - return &GetIdentityIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetIdentityIDParamsWithTimeout creates a new GetIdentityIDParams object -// with the ability to set a timeout on a request. -func NewGetIdentityIDParamsWithTimeout(timeout time.Duration) *GetIdentityIDParams { - return &GetIdentityIDParams{ - timeout: timeout, - } -} - -// NewGetIdentityIDParamsWithContext creates a new GetIdentityIDParams object -// with the ability to set a context for a request. -func NewGetIdentityIDParamsWithContext(ctx context.Context) *GetIdentityIDParams { - return &GetIdentityIDParams{ - Context: ctx, - } -} - -// NewGetIdentityIDParamsWithHTTPClient creates a new GetIdentityIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetIdentityIDParamsWithHTTPClient(client *http.Client) *GetIdentityIDParams { - return &GetIdentityIDParams{ - HTTPClient: client, - } -} - -/* -GetIdentityIDParams contains all the parameters to send to the API endpoint - - for the get identity ID operation. - - Typically these are written to a http.Request. -*/ -type GetIdentityIDParams struct { - - /* ID. - - Cluster wide unique identifier of a security identity. - - */ - ID string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get identity ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIdentityIDParams) WithDefaults() *GetIdentityIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get identity ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIdentityIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get identity ID params -func (o *GetIdentityIDParams) WithTimeout(timeout time.Duration) *GetIdentityIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get identity ID params -func (o *GetIdentityIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get identity ID params -func (o *GetIdentityIDParams) WithContext(ctx context.Context) *GetIdentityIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get identity ID params -func (o *GetIdentityIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get identity ID params -func (o *GetIdentityIDParams) WithHTTPClient(client *http.Client) *GetIdentityIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get identity ID params -func (o *GetIdentityIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get identity ID params -func (o *GetIdentityIDParams) WithID(id string) *GetIdentityIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get identity ID params -func (o *GetIdentityIDParams) SetID(id string) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetIdentityIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", o.ID); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go deleted file mode 100644 index c6d55d0ca..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_id_responses.go +++ /dev/null @@ -1,349 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetIdentityIDReader is a Reader for the GetIdentityID structure. -type GetIdentityIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetIdentityIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetIdentityIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetIdentityIDBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewGetIdentityIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 520: - result := NewGetIdentityIDUnreachable() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 521: - result := NewGetIdentityIDInvalidStorageFormat() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetIdentityIDOK creates a GetIdentityIDOK with default headers values -func NewGetIdentityIDOK() *GetIdentityIDOK { - return &GetIdentityIDOK{} -} - -/* -GetIdentityIDOK describes a response with status code 200, with default header values. - -Success -*/ -type GetIdentityIDOK struct { - Payload *models.Identity -} - -// IsSuccess returns true when this get identity Id o k response has a 2xx status code -func (o *GetIdentityIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get identity Id o k response has a 3xx status code -func (o *GetIdentityIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity Id o k response has a 4xx status code -func (o *GetIdentityIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get identity Id o k response has a 5xx status code -func (o *GetIdentityIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get identity Id o k response a status code equal to that given -func (o *GetIdentityIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetIdentityIDOK) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %+v", 200, o.Payload) -} - -func (o *GetIdentityIDOK) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdOK %+v", 200, o.Payload) -} - -func (o *GetIdentityIDOK) GetPayload() *models.Identity { - return o.Payload -} - -func (o *GetIdentityIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Identity) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetIdentityIDBadRequest creates a GetIdentityIDBadRequest with default headers values -func NewGetIdentityIDBadRequest() *GetIdentityIDBadRequest { - return &GetIdentityIDBadRequest{} -} - -/* -GetIdentityIDBadRequest describes a response with status code 400, with default header values. - -Invalid identity provided -*/ -type GetIdentityIDBadRequest struct { -} - -// IsSuccess returns true when this get identity Id bad request response has a 2xx status code -func (o *GetIdentityIDBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity Id bad request response has a 3xx status code -func (o *GetIdentityIDBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity Id bad request response has a 4xx status code -func (o *GetIdentityIDBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this get identity Id bad request response has a 5xx status code -func (o *GetIdentityIDBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this get identity Id bad request response a status code equal to that given -func (o *GetIdentityIDBadRequest) IsCode(code int) bool { - return code == 400 -} - -func (o *GetIdentityIDBadRequest) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest ", 400) -} - -func (o *GetIdentityIDBadRequest) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdBadRequest ", 400) -} - -func (o *GetIdentityIDBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetIdentityIDNotFound creates a GetIdentityIDNotFound with default headers values -func NewGetIdentityIDNotFound() *GetIdentityIDNotFound { - return &GetIdentityIDNotFound{} -} - -/* -GetIdentityIDNotFound describes a response with status code 404, with default header values. - -Identity not found -*/ -type GetIdentityIDNotFound struct { -} - -// IsSuccess returns true when this get identity Id not found response has a 2xx status code -func (o *GetIdentityIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity Id not found response has a 3xx status code -func (o *GetIdentityIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity Id not found response has a 4xx status code -func (o *GetIdentityIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get identity Id not found response has a 5xx status code -func (o *GetIdentityIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get identity Id not found response a status code equal to that given -func (o *GetIdentityIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetIdentityIDNotFound) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound ", 404) -} - -func (o *GetIdentityIDNotFound) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdNotFound ", 404) -} - -func (o *GetIdentityIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetIdentityIDUnreachable creates a GetIdentityIDUnreachable with default headers values -func NewGetIdentityIDUnreachable() *GetIdentityIDUnreachable { - return &GetIdentityIDUnreachable{} -} - -/* -GetIdentityIDUnreachable describes a response with status code 520, with default header values. - -Identity storage unreachable. Likely a network problem. -*/ -type GetIdentityIDUnreachable struct { - Payload models.Error -} - -// IsSuccess returns true when this get identity Id unreachable response has a 2xx status code -func (o *GetIdentityIDUnreachable) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity Id unreachable response has a 3xx status code -func (o *GetIdentityIDUnreachable) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity Id unreachable response has a 4xx status code -func (o *GetIdentityIDUnreachable) IsClientError() bool { - return false -} - -// IsServerError returns true when this get identity Id unreachable response has a 5xx status code -func (o *GetIdentityIDUnreachable) IsServerError() bool { - return true -} - -// IsCode returns true when this get identity Id unreachable response a status code equal to that given -func (o *GetIdentityIDUnreachable) IsCode(code int) bool { - return code == 520 -} - -func (o *GetIdentityIDUnreachable) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %+v", 520, o.Payload) -} - -func (o *GetIdentityIDUnreachable) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdUnreachable %+v", 520, o.Payload) -} - -func (o *GetIdentityIDUnreachable) GetPayload() models.Error { - return o.Payload -} - -func (o *GetIdentityIDUnreachable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetIdentityIDInvalidStorageFormat creates a GetIdentityIDInvalidStorageFormat with default headers values -func NewGetIdentityIDInvalidStorageFormat() *GetIdentityIDInvalidStorageFormat { - return &GetIdentityIDInvalidStorageFormat{} -} - -/* -GetIdentityIDInvalidStorageFormat describes a response with status code 521, with default header values. - -Invalid identity format in storage -*/ -type GetIdentityIDInvalidStorageFormat struct { - Payload models.Error -} - -// IsSuccess returns true when this get identity Id invalid storage format response has a 2xx status code -func (o *GetIdentityIDInvalidStorageFormat) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity Id invalid storage format response has a 3xx status code -func (o *GetIdentityIDInvalidStorageFormat) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity Id invalid storage format response has a 4xx status code -func (o *GetIdentityIDInvalidStorageFormat) IsClientError() bool { - return false -} - -// IsServerError returns true when this get identity Id invalid storage format response has a 5xx status code -func (o *GetIdentityIDInvalidStorageFormat) IsServerError() bool { - return true -} - -// IsCode returns true when this get identity Id invalid storage format response a status code equal to that given -func (o *GetIdentityIDInvalidStorageFormat) IsCode(code int) bool { - return code == 521 -} - -func (o *GetIdentityIDInvalidStorageFormat) Error() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %+v", 521, o.Payload) -} - -func (o *GetIdentityIDInvalidStorageFormat) String() string { - return fmt.Sprintf("[GET /identity/{id}][%d] getIdentityIdInvalidStorageFormat %+v", 521, o.Payload) -} - -func (o *GetIdentityIDInvalidStorageFormat) GetPayload() models.Error { - return o.Payload -} - -func (o *GetIdentityIDInvalidStorageFormat) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_parameters.go deleted file mode 100644 index a5a0fe298..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_parameters.go +++ /dev/null @@ -1,157 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewGetIdentityParams creates a new GetIdentityParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetIdentityParams() *GetIdentityParams { - return &GetIdentityParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetIdentityParamsWithTimeout creates a new GetIdentityParams object -// with the ability to set a timeout on a request. -func NewGetIdentityParamsWithTimeout(timeout time.Duration) *GetIdentityParams { - return &GetIdentityParams{ - timeout: timeout, - } -} - -// NewGetIdentityParamsWithContext creates a new GetIdentityParams object -// with the ability to set a context for a request. -func NewGetIdentityParamsWithContext(ctx context.Context) *GetIdentityParams { - return &GetIdentityParams{ - Context: ctx, - } -} - -// NewGetIdentityParamsWithHTTPClient creates a new GetIdentityParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetIdentityParamsWithHTTPClient(client *http.Client) *GetIdentityParams { - return &GetIdentityParams{ - HTTPClient: client, - } -} - -/* -GetIdentityParams contains all the parameters to send to the API endpoint - - for the get identity operation. - - Typically these are written to a http.Request. -*/ -type GetIdentityParams struct { - - /* Labels. - - List of labels - - */ - Labels models.Labels - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get identity params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIdentityParams) WithDefaults() *GetIdentityParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get identity params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIdentityParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get identity params -func (o *GetIdentityParams) WithTimeout(timeout time.Duration) *GetIdentityParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get identity params -func (o *GetIdentityParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get identity params -func (o *GetIdentityParams) WithContext(ctx context.Context) *GetIdentityParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get identity params -func (o *GetIdentityParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get identity params -func (o *GetIdentityParams) WithHTTPClient(client *http.Client) *GetIdentityParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get identity params -func (o *GetIdentityParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithLabels adds the labels to the get identity params -func (o *GetIdentityParams) WithLabels(labels models.Labels) *GetIdentityParams { - o.SetLabels(labels) - return o -} - -// SetLabels adds the labels to the get identity params -func (o *GetIdentityParams) SetLabels(labels models.Labels) { - o.Labels = labels -} - -// WriteToRequest writes these params to a swagger request -func (o *GetIdentityParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Labels != nil { - if err := r.SetBodyParam(o.Labels); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go deleted file mode 100644 index 64ea45296..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_identity_responses.go +++ /dev/null @@ -1,290 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetIdentityReader is a Reader for the GetIdentity structure. -type GetIdentityReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetIdentityReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetIdentityOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetIdentityNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 520: - result := NewGetIdentityUnreachable() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 521: - result := NewGetIdentityInvalidStorageFormat() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetIdentityOK creates a GetIdentityOK with default headers values -func NewGetIdentityOK() *GetIdentityOK { - return &GetIdentityOK{} -} - -/* -GetIdentityOK describes a response with status code 200, with default header values. - -Success -*/ -type GetIdentityOK struct { - Payload []*models.Identity -} - -// IsSuccess returns true when this get identity o k response has a 2xx status code -func (o *GetIdentityOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get identity o k response has a 3xx status code -func (o *GetIdentityOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity o k response has a 4xx status code -func (o *GetIdentityOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get identity o k response has a 5xx status code -func (o *GetIdentityOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get identity o k response a status code equal to that given -func (o *GetIdentityOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetIdentityOK) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityOK %+v", 200, o.Payload) -} - -func (o *GetIdentityOK) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityOK %+v", 200, o.Payload) -} - -func (o *GetIdentityOK) GetPayload() []*models.Identity { - return o.Payload -} - -func (o *GetIdentityOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetIdentityNotFound creates a GetIdentityNotFound with default headers values -func NewGetIdentityNotFound() *GetIdentityNotFound { - return &GetIdentityNotFound{} -} - -/* -GetIdentityNotFound describes a response with status code 404, with default header values. - -Identities with provided parameters not found -*/ -type GetIdentityNotFound struct { -} - -// IsSuccess returns true when this get identity not found response has a 2xx status code -func (o *GetIdentityNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity not found response has a 3xx status code -func (o *GetIdentityNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity not found response has a 4xx status code -func (o *GetIdentityNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get identity not found response has a 5xx status code -func (o *GetIdentityNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get identity not found response a status code equal to that given -func (o *GetIdentityNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetIdentityNotFound) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound ", 404) -} - -func (o *GetIdentityNotFound) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityNotFound ", 404) -} - -func (o *GetIdentityNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewGetIdentityUnreachable creates a GetIdentityUnreachable with default headers values -func NewGetIdentityUnreachable() *GetIdentityUnreachable { - return &GetIdentityUnreachable{} -} - -/* -GetIdentityUnreachable describes a response with status code 520, with default header values. - -Identity storage unreachable. Likely a network problem. -*/ -type GetIdentityUnreachable struct { - Payload models.Error -} - -// IsSuccess returns true when this get identity unreachable response has a 2xx status code -func (o *GetIdentityUnreachable) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity unreachable response has a 3xx status code -func (o *GetIdentityUnreachable) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity unreachable response has a 4xx status code -func (o *GetIdentityUnreachable) IsClientError() bool { - return false -} - -// IsServerError returns true when this get identity unreachable response has a 5xx status code -func (o *GetIdentityUnreachable) IsServerError() bool { - return true -} - -// IsCode returns true when this get identity unreachable response a status code equal to that given -func (o *GetIdentityUnreachable) IsCode(code int) bool { - return code == 520 -} - -func (o *GetIdentityUnreachable) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %+v", 520, o.Payload) -} - -func (o *GetIdentityUnreachable) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityUnreachable %+v", 520, o.Payload) -} - -func (o *GetIdentityUnreachable) GetPayload() models.Error { - return o.Payload -} - -func (o *GetIdentityUnreachable) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetIdentityInvalidStorageFormat creates a GetIdentityInvalidStorageFormat with default headers values -func NewGetIdentityInvalidStorageFormat() *GetIdentityInvalidStorageFormat { - return &GetIdentityInvalidStorageFormat{} -} - -/* -GetIdentityInvalidStorageFormat describes a response with status code 521, with default header values. - -Invalid identity format in storage -*/ -type GetIdentityInvalidStorageFormat struct { - Payload models.Error -} - -// IsSuccess returns true when this get identity invalid storage format response has a 2xx status code -func (o *GetIdentityInvalidStorageFormat) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get identity invalid storage format response has a 3xx status code -func (o *GetIdentityInvalidStorageFormat) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get identity invalid storage format response has a 4xx status code -func (o *GetIdentityInvalidStorageFormat) IsClientError() bool { - return false -} - -// IsServerError returns true when this get identity invalid storage format response has a 5xx status code -func (o *GetIdentityInvalidStorageFormat) IsServerError() bool { - return true -} - -// IsCode returns true when this get identity invalid storage format response a status code equal to that given -func (o *GetIdentityInvalidStorageFormat) IsCode(code int) bool { - return code == 521 -} - -func (o *GetIdentityInvalidStorageFormat) Error() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %+v", 521, o.Payload) -} - -func (o *GetIdentityInvalidStorageFormat) String() string { - return fmt.Sprintf("[GET /identity][%d] getIdentityInvalidStorageFormat %+v", 521, o.Payload) -} - -func (o *GetIdentityInvalidStorageFormat) GetPayload() models.Error { - return o.Payload -} - -func (o *GetIdentityInvalidStorageFormat) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go deleted file mode 100644 index 2f7c23112..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_parameters.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetIPParams creates a new GetIPParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetIPParams() *GetIPParams { - return &GetIPParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetIPParamsWithTimeout creates a new GetIPParams object -// with the ability to set a timeout on a request. -func NewGetIPParamsWithTimeout(timeout time.Duration) *GetIPParams { - return &GetIPParams{ - timeout: timeout, - } -} - -// NewGetIPParamsWithContext creates a new GetIPParams object -// with the ability to set a context for a request. -func NewGetIPParamsWithContext(ctx context.Context) *GetIPParams { - return &GetIPParams{ - Context: ctx, - } -} - -// NewGetIPParamsWithHTTPClient creates a new GetIPParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetIPParamsWithHTTPClient(client *http.Client) *GetIPParams { - return &GetIPParams{ - HTTPClient: client, - } -} - -/* -GetIPParams contains all the parameters to send to the API endpoint - - for the get IP operation. - - Typically these are written to a http.Request. -*/ -type GetIPParams struct { - - /* Cidr. - - A CIDR range of IPs - */ - Cidr *string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get IP params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIPParams) WithDefaults() *GetIPParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get IP params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetIPParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get IP params -func (o *GetIPParams) WithTimeout(timeout time.Duration) *GetIPParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get IP params -func (o *GetIPParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get IP params -func (o *GetIPParams) WithContext(ctx context.Context) *GetIPParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get IP params -func (o *GetIPParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get IP params -func (o *GetIPParams) WithHTTPClient(client *http.Client) *GetIPParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get IP params -func (o *GetIPParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithCidr adds the cidr to the get IP params -func (o *GetIPParams) WithCidr(cidr *string) *GetIPParams { - o.SetCidr(cidr) - return o -} - -// SetCidr adds the cidr to the get IP params -func (o *GetIPParams) SetCidr(cidr *string) { - o.Cidr = cidr -} - -// WriteToRequest writes these params to a swagger request -func (o *GetIPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if o.Cidr != nil { - - // query param cidr - var qrCidr string - - if o.Cidr != nil { - qrCidr = *o.Cidr - } - qCidr := qrCidr - if qCidr != "" { - - if err := r.SetQueryParam("cidr", qCidr); err != nil { - return err - } - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go deleted file mode 100644 index 37db9ccd2..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_ip_responses.go +++ /dev/null @@ -1,223 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetIPReader is a Reader for the GetIP structure. -type GetIPReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetIPReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetIPOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewGetIPBadRequest() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 404: - result := NewGetIPNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetIPOK creates a GetIPOK with default headers values -func NewGetIPOK() *GetIPOK { - return &GetIPOK{} -} - -/* -GetIPOK describes a response with status code 200, with default header values. - -Success -*/ -type GetIPOK struct { - Payload []*models.IPListEntry -} - -// IsSuccess returns true when this get Ip o k response has a 2xx status code -func (o *GetIPOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get Ip o k response has a 3xx status code -func (o *GetIPOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get Ip o k response has a 4xx status code -func (o *GetIPOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get Ip o k response has a 5xx status code -func (o *GetIPOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get Ip o k response a status code equal to that given -func (o *GetIPOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetIPOK) Error() string { - return fmt.Sprintf("[GET /ip][%d] getIpOK %+v", 200, o.Payload) -} - -func (o *GetIPOK) String() string { - return fmt.Sprintf("[GET /ip][%d] getIpOK %+v", 200, o.Payload) -} - -func (o *GetIPOK) GetPayload() []*models.IPListEntry { - return o.Payload -} - -func (o *GetIPOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetIPBadRequest creates a GetIPBadRequest with default headers values -func NewGetIPBadRequest() *GetIPBadRequest { - return &GetIPBadRequest{} -} - -/* -GetIPBadRequest describes a response with status code 400, with default header values. - -Invalid request (error parsing parameters) -*/ -type GetIPBadRequest struct { - Payload models.Error -} - -// IsSuccess returns true when this get Ip bad request response has a 2xx status code -func (o *GetIPBadRequest) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get Ip bad request response has a 3xx status code -func (o *GetIPBadRequest) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get Ip bad request response has a 4xx status code -func (o *GetIPBadRequest) IsClientError() bool { - return true -} - -// IsServerError returns true when this get Ip bad request response has a 5xx status code -func (o *GetIPBadRequest) IsServerError() bool { - return false -} - -// IsCode returns true when this get Ip bad request response a status code equal to that given -func (o *GetIPBadRequest) IsCode(code int) bool { - return code == 400 -} - -func (o *GetIPBadRequest) Error() string { - return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %+v", 400, o.Payload) -} - -func (o *GetIPBadRequest) String() string { - return fmt.Sprintf("[GET /ip][%d] getIpBadRequest %+v", 400, o.Payload) -} - -func (o *GetIPBadRequest) GetPayload() models.Error { - return o.Payload -} - -func (o *GetIPBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetIPNotFound creates a GetIPNotFound with default headers values -func NewGetIPNotFound() *GetIPNotFound { - return &GetIPNotFound{} -} - -/* -GetIPNotFound describes a response with status code 404, with default header values. - -No IP cache entries with provided parameters found -*/ -type GetIPNotFound struct { -} - -// IsSuccess returns true when this get Ip not found response has a 2xx status code -func (o *GetIPNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get Ip not found response has a 3xx status code -func (o *GetIPNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get Ip not found response has a 4xx status code -func (o *GetIPNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get Ip not found response has a 5xx status code -func (o *GetIPNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get Ip not found response a status code equal to that given -func (o *GetIPNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetIPNotFound) Error() string { - return fmt.Sprintf("[GET /ip][%d] getIpNotFound ", 404) -} - -func (o *GetIPNotFound) String() string { - return fmt.Sprintf("[GET /ip][%d] getIpNotFound ", 404) -} - -func (o *GetIPNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_parameters.go deleted file mode 100644 index 33c66c92a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_parameters.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewGetPolicyParams creates a new GetPolicyParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetPolicyParams() *GetPolicyParams { - return &GetPolicyParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetPolicyParamsWithTimeout creates a new GetPolicyParams object -// with the ability to set a timeout on a request. -func NewGetPolicyParamsWithTimeout(timeout time.Duration) *GetPolicyParams { - return &GetPolicyParams{ - timeout: timeout, - } -} - -// NewGetPolicyParamsWithContext creates a new GetPolicyParams object -// with the ability to set a context for a request. -func NewGetPolicyParamsWithContext(ctx context.Context) *GetPolicyParams { - return &GetPolicyParams{ - Context: ctx, - } -} - -// NewGetPolicyParamsWithHTTPClient creates a new GetPolicyParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetPolicyParamsWithHTTPClient(client *http.Client) *GetPolicyParams { - return &GetPolicyParams{ - HTTPClient: client, - } -} - -/* -GetPolicyParams contains all the parameters to send to the API endpoint - - for the get policy operation. - - Typically these are written to a http.Request. -*/ -type GetPolicyParams struct { - - // Labels. - Labels models.Labels - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get policy params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetPolicyParams) WithDefaults() *GetPolicyParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get policy params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetPolicyParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get policy params -func (o *GetPolicyParams) WithTimeout(timeout time.Duration) *GetPolicyParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get policy params -func (o *GetPolicyParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get policy params -func (o *GetPolicyParams) WithContext(ctx context.Context) *GetPolicyParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get policy params -func (o *GetPolicyParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get policy params -func (o *GetPolicyParams) WithHTTPClient(client *http.Client) *GetPolicyParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get policy params -func (o *GetPolicyParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithLabels adds the labels to the get policy params -func (o *GetPolicyParams) WithLabels(labels models.Labels) *GetPolicyParams { - o.SetLabels(labels) - return o -} - -// SetLabels adds the labels to the get policy params -func (o *GetPolicyParams) SetLabels(labels models.Labels) { - o.Labels = labels -} - -// WriteToRequest writes these params to a swagger request -func (o *GetPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Labels != nil { - if err := r.SetBodyParam(o.Labels); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go deleted file mode 100644 index cba45439d..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_responses.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetPolicyReader is a Reader for the GetPolicy structure. -type GetPolicyReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetPolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetPolicyOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetPolicyNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetPolicyOK creates a GetPolicyOK with default headers values -func NewGetPolicyOK() *GetPolicyOK { - return &GetPolicyOK{} -} - -/* -GetPolicyOK describes a response with status code 200, with default header values. - -Success -*/ -type GetPolicyOK struct { - Payload *models.Policy -} - -// IsSuccess returns true when this get policy o k response has a 2xx status code -func (o *GetPolicyOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get policy o k response has a 3xx status code -func (o *GetPolicyOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get policy o k response has a 4xx status code -func (o *GetPolicyOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get policy o k response has a 5xx status code -func (o *GetPolicyOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get policy o k response a status code equal to that given -func (o *GetPolicyOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetPolicyOK) Error() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyOK %+v", 200, o.Payload) -} - -func (o *GetPolicyOK) String() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyOK %+v", 200, o.Payload) -} - -func (o *GetPolicyOK) GetPayload() *models.Policy { - return o.Payload -} - -func (o *GetPolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Policy) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetPolicyNotFound creates a GetPolicyNotFound with default headers values -func NewGetPolicyNotFound() *GetPolicyNotFound { - return &GetPolicyNotFound{} -} - -/* -GetPolicyNotFound describes a response with status code 404, with default header values. - -No policy rules found -*/ -type GetPolicyNotFound struct { -} - -// IsSuccess returns true when this get policy not found response has a 2xx status code -func (o *GetPolicyNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get policy not found response has a 3xx status code -func (o *GetPolicyNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get policy not found response has a 4xx status code -func (o *GetPolicyNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get policy not found response has a 5xx status code -func (o *GetPolicyNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get policy not found response a status code equal to that given -func (o *GetPolicyNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetPolicyNotFound) Error() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound ", 404) -} - -func (o *GetPolicyNotFound) String() string { - return fmt.Sprintf("[GET /policy][%d] getPolicyNotFound ", 404) -} - -func (o *GetPolicyNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_parameters.go deleted file mode 100644 index 388f81f59..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetPolicySelectorsParams creates a new GetPolicySelectorsParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetPolicySelectorsParams() *GetPolicySelectorsParams { - return &GetPolicySelectorsParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetPolicySelectorsParamsWithTimeout creates a new GetPolicySelectorsParams object -// with the ability to set a timeout on a request. -func NewGetPolicySelectorsParamsWithTimeout(timeout time.Duration) *GetPolicySelectorsParams { - return &GetPolicySelectorsParams{ - timeout: timeout, - } -} - -// NewGetPolicySelectorsParamsWithContext creates a new GetPolicySelectorsParams object -// with the ability to set a context for a request. -func NewGetPolicySelectorsParamsWithContext(ctx context.Context) *GetPolicySelectorsParams { - return &GetPolicySelectorsParams{ - Context: ctx, - } -} - -// NewGetPolicySelectorsParamsWithHTTPClient creates a new GetPolicySelectorsParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetPolicySelectorsParamsWithHTTPClient(client *http.Client) *GetPolicySelectorsParams { - return &GetPolicySelectorsParams{ - HTTPClient: client, - } -} - -/* -GetPolicySelectorsParams contains all the parameters to send to the API endpoint - - for the get policy selectors operation. - - Typically these are written to a http.Request. -*/ -type GetPolicySelectorsParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get policy selectors params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetPolicySelectorsParams) WithDefaults() *GetPolicySelectorsParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get policy selectors params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetPolicySelectorsParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get policy selectors params -func (o *GetPolicySelectorsParams) WithTimeout(timeout time.Duration) *GetPolicySelectorsParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get policy selectors params -func (o *GetPolicySelectorsParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get policy selectors params -func (o *GetPolicySelectorsParams) WithContext(ctx context.Context) *GetPolicySelectorsParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get policy selectors params -func (o *GetPolicySelectorsParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get policy selectors params -func (o *GetPolicySelectorsParams) WithHTTPClient(client *http.Client) *GetPolicySelectorsParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get policy selectors params -func (o *GetPolicySelectorsParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetPolicySelectorsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go deleted file mode 100644 index a1cbec4a3..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/get_policy_selectors_responses.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetPolicySelectorsReader is a Reader for the GetPolicySelectors structure. -type GetPolicySelectorsReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetPolicySelectorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetPolicySelectorsOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetPolicySelectorsOK creates a GetPolicySelectorsOK with default headers values -func NewGetPolicySelectorsOK() *GetPolicySelectorsOK { - return &GetPolicySelectorsOK{} -} - -/* -GetPolicySelectorsOK describes a response with status code 200, with default header values. - -Success -*/ -type GetPolicySelectorsOK struct { - Payload models.SelectorCache -} - -// IsSuccess returns true when this get policy selectors o k response has a 2xx status code -func (o *GetPolicySelectorsOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get policy selectors o k response has a 3xx status code -func (o *GetPolicySelectorsOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get policy selectors o k response has a 4xx status code -func (o *GetPolicySelectorsOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get policy selectors o k response has a 5xx status code -func (o *GetPolicySelectorsOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get policy selectors o k response a status code equal to that given -func (o *GetPolicySelectorsOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetPolicySelectorsOK) Error() string { - return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %+v", 200, o.Payload) -} - -func (o *GetPolicySelectorsOK) String() string { - return fmt.Sprintf("[GET /policy/selectors][%d] getPolicySelectorsOK %+v", 200, o.Payload) -} - -func (o *GetPolicySelectorsOK) GetPayload() models.SelectorCache { - return o.Payload -} - -func (o *GetPolicySelectorsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go deleted file mode 100644 index c72927034..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/policy_client.go +++ /dev/null @@ -1,548 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new policy API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for policy API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - DeleteFqdnCache(params *DeleteFqdnCacheParams, opts ...ClientOption) (*DeleteFqdnCacheOK, error) - - DeletePolicy(params *DeletePolicyParams, opts ...ClientOption) (*DeletePolicyOK, error) - - GetFqdnCache(params *GetFqdnCacheParams, opts ...ClientOption) (*GetFqdnCacheOK, error) - - GetFqdnCacheID(params *GetFqdnCacheIDParams, opts ...ClientOption) (*GetFqdnCacheIDOK, error) - - GetFqdnNames(params *GetFqdnNamesParams, opts ...ClientOption) (*GetFqdnNamesOK, error) - - GetIP(params *GetIPParams, opts ...ClientOption) (*GetIPOK, error) - - GetIdentity(params *GetIdentityParams, opts ...ClientOption) (*GetIdentityOK, error) - - GetIdentityEndpoints(params *GetIdentityEndpointsParams, opts ...ClientOption) (*GetIdentityEndpointsOK, error) - - GetIdentityID(params *GetIdentityIDParams, opts ...ClientOption) (*GetIdentityIDOK, error) - - GetPolicy(params *GetPolicyParams, opts ...ClientOption) (*GetPolicyOK, error) - - GetPolicySelectors(params *GetPolicySelectorsParams, opts ...ClientOption) (*GetPolicySelectorsOK, error) - - PutPolicy(params *PutPolicyParams, opts ...ClientOption) (*PutPolicyOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* - DeleteFqdnCache deletes matching DNS lookups from the policy generation cache - - Deletes matching DNS lookups from the cache, optionally restricted by - -DNS name. The removed IP data will no longer be used in generated -policies. -*/ -func (a *Client) DeleteFqdnCache(params *DeleteFqdnCacheParams, opts ...ClientOption) (*DeleteFqdnCacheOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewDeleteFqdnCacheParams() - } - op := &runtime.ClientOperation{ - ID: "DeleteFqdnCache", - Method: "DELETE", - PathPattern: "/fqdn/cache", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &DeleteFqdnCacheReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*DeleteFqdnCacheOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for DeleteFqdnCache: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -DeletePolicy deletes a policy sub tree -*/ -func (a *Client) DeletePolicy(params *DeletePolicyParams, opts ...ClientOption) (*DeletePolicyOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewDeletePolicyParams() - } - op := &runtime.ClientOperation{ - ID: "DeletePolicy", - Method: "DELETE", - PathPattern: "/policy", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &DeletePolicyReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*DeletePolicyOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for DeletePolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - GetFqdnCache retrieves the list of DNS lookups intercepted from all endpoints - - Retrieves the list of DNS lookups intercepted from endpoints, - -optionally filtered by DNS name, CIDR IP range or source. -*/ -func (a *Client) GetFqdnCache(params *GetFqdnCacheParams, opts ...ClientOption) (*GetFqdnCacheOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetFqdnCacheParams() - } - op := &runtime.ClientOperation{ - ID: "GetFqdnCache", - Method: "GET", - PathPattern: "/fqdn/cache", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetFqdnCacheReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetFqdnCacheOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetFqdnCache: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - GetFqdnCacheID retrieves the list of DNS lookups intercepted from an endpoint - - Retrieves the list of DNS lookups intercepted from the specific endpoint, - -optionally filtered by endpoint id, DNS name, CIDR IP range or source. -*/ -func (a *Client) GetFqdnCacheID(params *GetFqdnCacheIDParams, opts ...ClientOption) (*GetFqdnCacheIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetFqdnCacheIDParams() - } - op := &runtime.ClientOperation{ - ID: "GetFqdnCacheID", - Method: "GET", - PathPattern: "/fqdn/cache/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetFqdnCacheIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetFqdnCacheIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetFqdnCacheID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - GetFqdnNames lists internal DNS selector representations - - Retrieves the list of DNS-related fields (names to poll, selectors and - -their corresponding regexes). -*/ -func (a *Client) GetFqdnNames(params *GetFqdnNamesParams, opts ...ClientOption) (*GetFqdnNamesOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetFqdnNamesParams() - } - op := &runtime.ClientOperation{ - ID: "GetFqdnNames", - Method: "GET", - PathPattern: "/fqdn/names", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetFqdnNamesReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetFqdnNamesOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetFqdnNames: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - GetIP lists information about known IP addresses - - Retrieves a list of IPs with known associated information such as - -their identities, host addresses, Kubernetes pod names, etc. -The list can optionally filtered by a CIDR IP range. -*/ -func (a *Client) GetIP(params *GetIPParams, opts ...ClientOption) (*GetIPOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetIPParams() - } - op := &runtime.ClientOperation{ - ID: "GetIP", - Method: "GET", - PathPattern: "/ip", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetIPReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetIPOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetIP: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetIdentity retrieves a list of identities that have metadata matching the provided parameters - -Retrieves a list of identities that have metadata matching the provided parameters, or all identities if no parameters are provided. -*/ -func (a *Client) GetIdentity(params *GetIdentityParams, opts ...ClientOption) (*GetIdentityOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetIdentityParams() - } - op := &runtime.ClientOperation{ - ID: "GetIdentity", - Method: "GET", - PathPattern: "/identity", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetIdentityReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetIdentityOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetIdentity: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetIdentityEndpoints retrieves identities which are being used by local endpoints -*/ -func (a *Client) GetIdentityEndpoints(params *GetIdentityEndpointsParams, opts ...ClientOption) (*GetIdentityEndpointsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetIdentityEndpointsParams() - } - op := &runtime.ClientOperation{ - ID: "GetIdentityEndpoints", - Method: "GET", - PathPattern: "/identity/endpoints", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetIdentityEndpointsReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetIdentityEndpointsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetIdentityEndpoints: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetIdentityID retrieves identity -*/ -func (a *Client) GetIdentityID(params *GetIdentityIDParams, opts ...ClientOption) (*GetIdentityIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetIdentityIDParams() - } - op := &runtime.ClientOperation{ - ID: "GetIdentityID", - Method: "GET", - PathPattern: "/identity/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetIdentityIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetIdentityIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetIdentityID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetPolicy retrieves entire policy tree - -Returns the entire policy tree with all children. -*/ -func (a *Client) GetPolicy(params *GetPolicyParams, opts ...ClientOption) (*GetPolicyOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetPolicyParams() - } - op := &runtime.ClientOperation{ - ID: "GetPolicy", - Method: "GET", - PathPattern: "/policy", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetPolicyReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetPolicyOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetPolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetPolicySelectors sees what selectors match which identities -*/ -func (a *Client) GetPolicySelectors(params *GetPolicySelectorsParams, opts ...ClientOption) (*GetPolicySelectorsOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetPolicySelectorsParams() - } - op := &runtime.ClientOperation{ - ID: "GetPolicySelectors", - Method: "GET", - PathPattern: "/policy/selectors", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetPolicySelectorsReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetPolicySelectorsOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetPolicySelectors: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PutPolicy creates or update a policy sub tree -*/ -func (a *Client) PutPolicy(params *PutPolicyParams, opts ...ClientOption) (*PutPolicyOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPutPolicyParams() - } - op := &runtime.ClientOperation{ - ID: "PutPolicy", - Method: "PUT", - PathPattern: "/policy", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PutPolicyReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PutPolicyOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PutPolicy: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go deleted file mode 100644 index 769f669d8..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_parameters.go +++ /dev/null @@ -1,152 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewPutPolicyParams creates a new PutPolicyParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPutPolicyParams() *PutPolicyParams { - return &PutPolicyParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPutPolicyParamsWithTimeout creates a new PutPolicyParams object -// with the ability to set a timeout on a request. -func NewPutPolicyParamsWithTimeout(timeout time.Duration) *PutPolicyParams { - return &PutPolicyParams{ - timeout: timeout, - } -} - -// NewPutPolicyParamsWithContext creates a new PutPolicyParams object -// with the ability to set a context for a request. -func NewPutPolicyParamsWithContext(ctx context.Context) *PutPolicyParams { - return &PutPolicyParams{ - Context: ctx, - } -} - -// NewPutPolicyParamsWithHTTPClient creates a new PutPolicyParams object -// with the ability to set a custom HTTPClient for a request. -func NewPutPolicyParamsWithHTTPClient(client *http.Client) *PutPolicyParams { - return &PutPolicyParams{ - HTTPClient: client, - } -} - -/* -PutPolicyParams contains all the parameters to send to the API endpoint - - for the put policy operation. - - Typically these are written to a http.Request. -*/ -type PutPolicyParams struct { - - /* Policy. - - Policy rules - */ - Policy string - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the put policy params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutPolicyParams) WithDefaults() *PutPolicyParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the put policy params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutPolicyParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the put policy params -func (o *PutPolicyParams) WithTimeout(timeout time.Duration) *PutPolicyParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the put policy params -func (o *PutPolicyParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the put policy params -func (o *PutPolicyParams) WithContext(ctx context.Context) *PutPolicyParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the put policy params -func (o *PutPolicyParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the put policy params -func (o *PutPolicyParams) WithHTTPClient(client *http.Client) *PutPolicyParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the put policy params -func (o *PutPolicyParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithPolicy adds the policy to the put policy params -func (o *PutPolicyParams) WithPolicy(policy string) *PutPolicyParams { - o.SetPolicy(policy) - return o -} - -// SetPolicy adds the policy to the put policy params -func (o *PutPolicyParams) SetPolicy(policy string) { - o.Policy = policy -} - -// WriteToRequest writes these params to a swagger request -func (o *PutPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if err := r.SetBodyParam(o.Policy); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go deleted file mode 100644 index e328a8542..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/policy/put_policy_responses.go +++ /dev/null @@ -1,302 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package policy - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PutPolicyReader is a Reader for the PutPolicy structure. -type PutPolicyReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PutPolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPutPolicyOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 400: - result := NewPutPolicyInvalidPolicy() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 460: - result := NewPutPolicyInvalidPath() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPutPolicyFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPutPolicyOK creates a PutPolicyOK with default headers values -func NewPutPolicyOK() *PutPolicyOK { - return &PutPolicyOK{} -} - -/* -PutPolicyOK describes a response with status code 200, with default header values. - -Success -*/ -type PutPolicyOK struct { - Payload *models.Policy -} - -// IsSuccess returns true when this put policy o k response has a 2xx status code -func (o *PutPolicyOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this put policy o k response has a 3xx status code -func (o *PutPolicyOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put policy o k response has a 4xx status code -func (o *PutPolicyOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this put policy o k response has a 5xx status code -func (o *PutPolicyOK) IsServerError() bool { - return false -} - -// IsCode returns true when this put policy o k response a status code equal to that given -func (o *PutPolicyOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PutPolicyOK) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %+v", 200, o.Payload) -} - -func (o *PutPolicyOK) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyOK %+v", 200, o.Payload) -} - -func (o *PutPolicyOK) GetPayload() *models.Policy { - return o.Payload -} - -func (o *PutPolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Policy) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutPolicyInvalidPolicy creates a PutPolicyInvalidPolicy with default headers values -func NewPutPolicyInvalidPolicy() *PutPolicyInvalidPolicy { - return &PutPolicyInvalidPolicy{} -} - -/* -PutPolicyInvalidPolicy describes a response with status code 400, with default header values. - -Invalid policy -*/ -type PutPolicyInvalidPolicy struct { - Payload models.Error -} - -// IsSuccess returns true when this put policy invalid policy response has a 2xx status code -func (o *PutPolicyInvalidPolicy) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put policy invalid policy response has a 3xx status code -func (o *PutPolicyInvalidPolicy) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put policy invalid policy response has a 4xx status code -func (o *PutPolicyInvalidPolicy) IsClientError() bool { - return true -} - -// IsServerError returns true when this put policy invalid policy response has a 5xx status code -func (o *PutPolicyInvalidPolicy) IsServerError() bool { - return false -} - -// IsCode returns true when this put policy invalid policy response a status code equal to that given -func (o *PutPolicyInvalidPolicy) IsCode(code int) bool { - return code == 400 -} - -func (o *PutPolicyInvalidPolicy) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %+v", 400, o.Payload) -} - -func (o *PutPolicyInvalidPolicy) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPolicy %+v", 400, o.Payload) -} - -func (o *PutPolicyInvalidPolicy) GetPayload() models.Error { - return o.Payload -} - -func (o *PutPolicyInvalidPolicy) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutPolicyInvalidPath creates a PutPolicyInvalidPath with default headers values -func NewPutPolicyInvalidPath() *PutPolicyInvalidPath { - return &PutPolicyInvalidPath{} -} - -/* -PutPolicyInvalidPath describes a response with status code 460, with default header values. - -Invalid path -*/ -type PutPolicyInvalidPath struct { - Payload models.Error -} - -// IsSuccess returns true when this put policy invalid path response has a 2xx status code -func (o *PutPolicyInvalidPath) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put policy invalid path response has a 3xx status code -func (o *PutPolicyInvalidPath) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put policy invalid path response has a 4xx status code -func (o *PutPolicyInvalidPath) IsClientError() bool { - return true -} - -// IsServerError returns true when this put policy invalid path response has a 5xx status code -func (o *PutPolicyInvalidPath) IsServerError() bool { - return false -} - -// IsCode returns true when this put policy invalid path response a status code equal to that given -func (o *PutPolicyInvalidPath) IsCode(code int) bool { - return code == 460 -} - -func (o *PutPolicyInvalidPath) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %+v", 460, o.Payload) -} - -func (o *PutPolicyInvalidPath) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyInvalidPath %+v", 460, o.Payload) -} - -func (o *PutPolicyInvalidPath) GetPayload() models.Error { - return o.Payload -} - -func (o *PutPolicyInvalidPath) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutPolicyFailure creates a PutPolicyFailure with default headers values -func NewPutPolicyFailure() *PutPolicyFailure { - return &PutPolicyFailure{} -} - -/* -PutPolicyFailure describes a response with status code 500, with default header values. - -Policy import failed -*/ -type PutPolicyFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this put policy failure response has a 2xx status code -func (o *PutPolicyFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put policy failure response has a 3xx status code -func (o *PutPolicyFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put policy failure response has a 4xx status code -func (o *PutPolicyFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this put policy failure response has a 5xx status code -func (o *PutPolicyFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this put policy failure response a status code equal to that given -func (o *PutPolicyFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *PutPolicyFailure) Error() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %+v", 500, o.Payload) -} - -func (o *PutPolicyFailure) String() string { - return fmt.Sprintf("[PUT /policy][%d] putPolicyFailure %+v", 500, o.Payload) -} - -func (o *PutPolicyFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PutPolicyFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_parameters.go deleted file mode 100644 index 889f2d2e9..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_parameters.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package prefilter - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewDeletePrefilterParams creates a new DeletePrefilterParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewDeletePrefilterParams() *DeletePrefilterParams { - return &DeletePrefilterParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewDeletePrefilterParamsWithTimeout creates a new DeletePrefilterParams object -// with the ability to set a timeout on a request. -func NewDeletePrefilterParamsWithTimeout(timeout time.Duration) *DeletePrefilterParams { - return &DeletePrefilterParams{ - timeout: timeout, - } -} - -// NewDeletePrefilterParamsWithContext creates a new DeletePrefilterParams object -// with the ability to set a context for a request. -func NewDeletePrefilterParamsWithContext(ctx context.Context) *DeletePrefilterParams { - return &DeletePrefilterParams{ - Context: ctx, - } -} - -// NewDeletePrefilterParamsWithHTTPClient creates a new DeletePrefilterParams object -// with the ability to set a custom HTTPClient for a request. -func NewDeletePrefilterParamsWithHTTPClient(client *http.Client) *DeletePrefilterParams { - return &DeletePrefilterParams{ - HTTPClient: client, - } -} - -/* -DeletePrefilterParams contains all the parameters to send to the API endpoint - - for the delete prefilter operation. - - Typically these are written to a http.Request. -*/ -type DeletePrefilterParams struct { - - /* PrefilterSpec. - - List of CIDR ranges for filter table - */ - PrefilterSpec *models.PrefilterSpec - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the delete prefilter params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeletePrefilterParams) WithDefaults() *DeletePrefilterParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the delete prefilter params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeletePrefilterParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the delete prefilter params -func (o *DeletePrefilterParams) WithTimeout(timeout time.Duration) *DeletePrefilterParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the delete prefilter params -func (o *DeletePrefilterParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the delete prefilter params -func (o *DeletePrefilterParams) WithContext(ctx context.Context) *DeletePrefilterParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the delete prefilter params -func (o *DeletePrefilterParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the delete prefilter params -func (o *DeletePrefilterParams) WithHTTPClient(client *http.Client) *DeletePrefilterParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the delete prefilter params -func (o *DeletePrefilterParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithPrefilterSpec adds the prefilterSpec to the delete prefilter params -func (o *DeletePrefilterParams) WithPrefilterSpec(prefilterSpec *models.PrefilterSpec) *DeletePrefilterParams { - o.SetPrefilterSpec(prefilterSpec) - return o -} - -// SetPrefilterSpec adds the prefilterSpec to the delete prefilter params -func (o *DeletePrefilterParams) SetPrefilterSpec(prefilterSpec *models.PrefilterSpec) { - o.PrefilterSpec = prefilterSpec -} - -// WriteToRequest writes these params to a swagger request -func (o *DeletePrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.PrefilterSpec != nil { - if err := r.SetBodyParam(o.PrefilterSpec); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go deleted file mode 100644 index 002694d50..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/delete_prefilter_responses.go +++ /dev/null @@ -1,235 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package prefilter - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// DeletePrefilterReader is a Reader for the DeletePrefilter structure. -type DeletePrefilterReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *DeletePrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewDeletePrefilterOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 461: - result := NewDeletePrefilterInvalidCIDR() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewDeletePrefilterFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewDeletePrefilterOK creates a DeletePrefilterOK with default headers values -func NewDeletePrefilterOK() *DeletePrefilterOK { - return &DeletePrefilterOK{} -} - -/* -DeletePrefilterOK describes a response with status code 200, with default header values. - -Deleted -*/ -type DeletePrefilterOK struct { - Payload *models.Prefilter -} - -// IsSuccess returns true when this delete prefilter o k response has a 2xx status code -func (o *DeletePrefilterOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete prefilter o k response has a 3xx status code -func (o *DeletePrefilterOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete prefilter o k response has a 4xx status code -func (o *DeletePrefilterOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete prefilter o k response has a 5xx status code -func (o *DeletePrefilterOK) IsServerError() bool { - return false -} - -// IsCode returns true when this delete prefilter o k response a status code equal to that given -func (o *DeletePrefilterOK) IsCode(code int) bool { - return code == 200 -} - -func (o *DeletePrefilterOK) Error() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %+v", 200, o.Payload) -} - -func (o *DeletePrefilterOK) String() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterOK %+v", 200, o.Payload) -} - -func (o *DeletePrefilterOK) GetPayload() *models.Prefilter { - return o.Payload -} - -func (o *DeletePrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Prefilter) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewDeletePrefilterInvalidCIDR creates a DeletePrefilterInvalidCIDR with default headers values -func NewDeletePrefilterInvalidCIDR() *DeletePrefilterInvalidCIDR { - return &DeletePrefilterInvalidCIDR{} -} - -/* -DeletePrefilterInvalidCIDR describes a response with status code 461, with default header values. - -Invalid CIDR prefix -*/ -type DeletePrefilterInvalidCIDR struct { - Payload models.Error -} - -// IsSuccess returns true when this delete prefilter invalid c Id r response has a 2xx status code -func (o *DeletePrefilterInvalidCIDR) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete prefilter invalid c Id r response has a 3xx status code -func (o *DeletePrefilterInvalidCIDR) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete prefilter invalid c Id r response has a 4xx status code -func (o *DeletePrefilterInvalidCIDR) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete prefilter invalid c Id r response has a 5xx status code -func (o *DeletePrefilterInvalidCIDR) IsServerError() bool { - return false -} - -// IsCode returns true when this delete prefilter invalid c Id r response a status code equal to that given -func (o *DeletePrefilterInvalidCIDR) IsCode(code int) bool { - return code == 461 -} - -func (o *DeletePrefilterInvalidCIDR) Error() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %+v", 461, o.Payload) -} - -func (o *DeletePrefilterInvalidCIDR) String() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterInvalidCIdR %+v", 461, o.Payload) -} - -func (o *DeletePrefilterInvalidCIDR) GetPayload() models.Error { - return o.Payload -} - -func (o *DeletePrefilterInvalidCIDR) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewDeletePrefilterFailure creates a DeletePrefilterFailure with default headers values -func NewDeletePrefilterFailure() *DeletePrefilterFailure { - return &DeletePrefilterFailure{} -} - -/* -DeletePrefilterFailure describes a response with status code 500, with default header values. - -Prefilter delete failed -*/ -type DeletePrefilterFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this delete prefilter failure response has a 2xx status code -func (o *DeletePrefilterFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete prefilter failure response has a 3xx status code -func (o *DeletePrefilterFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete prefilter failure response has a 4xx status code -func (o *DeletePrefilterFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete prefilter failure response has a 5xx status code -func (o *DeletePrefilterFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this delete prefilter failure response a status code equal to that given -func (o *DeletePrefilterFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *DeletePrefilterFailure) Error() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %+v", 500, o.Payload) -} - -func (o *DeletePrefilterFailure) String() string { - return fmt.Sprintf("[DELETE /prefilter][%d] deletePrefilterFailure %+v", 500, o.Payload) -} - -func (o *DeletePrefilterFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *DeletePrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_parameters.go deleted file mode 100644 index 40c5e7bfa..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package prefilter - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetPrefilterParams creates a new GetPrefilterParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetPrefilterParams() *GetPrefilterParams { - return &GetPrefilterParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetPrefilterParamsWithTimeout creates a new GetPrefilterParams object -// with the ability to set a timeout on a request. -func NewGetPrefilterParamsWithTimeout(timeout time.Duration) *GetPrefilterParams { - return &GetPrefilterParams{ - timeout: timeout, - } -} - -// NewGetPrefilterParamsWithContext creates a new GetPrefilterParams object -// with the ability to set a context for a request. -func NewGetPrefilterParamsWithContext(ctx context.Context) *GetPrefilterParams { - return &GetPrefilterParams{ - Context: ctx, - } -} - -// NewGetPrefilterParamsWithHTTPClient creates a new GetPrefilterParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetPrefilterParamsWithHTTPClient(client *http.Client) *GetPrefilterParams { - return &GetPrefilterParams{ - HTTPClient: client, - } -} - -/* -GetPrefilterParams contains all the parameters to send to the API endpoint - - for the get prefilter operation. - - Typically these are written to a http.Request. -*/ -type GetPrefilterParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get prefilter params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetPrefilterParams) WithDefaults() *GetPrefilterParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get prefilter params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetPrefilterParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get prefilter params -func (o *GetPrefilterParams) WithTimeout(timeout time.Duration) *GetPrefilterParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get prefilter params -func (o *GetPrefilterParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get prefilter params -func (o *GetPrefilterParams) WithContext(ctx context.Context) *GetPrefilterParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get prefilter params -func (o *GetPrefilterParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get prefilter params -func (o *GetPrefilterParams) WithHTTPClient(client *http.Client) *GetPrefilterParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get prefilter params -func (o *GetPrefilterParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetPrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go deleted file mode 100644 index 63e051e3a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/get_prefilter_responses.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package prefilter - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetPrefilterReader is a Reader for the GetPrefilter structure. -type GetPrefilterReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetPrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetPrefilterOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewGetPrefilterFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetPrefilterOK creates a GetPrefilterOK with default headers values -func NewGetPrefilterOK() *GetPrefilterOK { - return &GetPrefilterOK{} -} - -/* -GetPrefilterOK describes a response with status code 200, with default header values. - -Success -*/ -type GetPrefilterOK struct { - Payload *models.Prefilter -} - -// IsSuccess returns true when this get prefilter o k response has a 2xx status code -func (o *GetPrefilterOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get prefilter o k response has a 3xx status code -func (o *GetPrefilterOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get prefilter o k response has a 4xx status code -func (o *GetPrefilterOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get prefilter o k response has a 5xx status code -func (o *GetPrefilterOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get prefilter o k response a status code equal to that given -func (o *GetPrefilterOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetPrefilterOK) Error() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %+v", 200, o.Payload) -} - -func (o *GetPrefilterOK) String() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterOK %+v", 200, o.Payload) -} - -func (o *GetPrefilterOK) GetPayload() *models.Prefilter { - return o.Payload -} - -func (o *GetPrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Prefilter) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetPrefilterFailure creates a GetPrefilterFailure with default headers values -func NewGetPrefilterFailure() *GetPrefilterFailure { - return &GetPrefilterFailure{} -} - -/* -GetPrefilterFailure describes a response with status code 500, with default header values. - -Prefilter get failed -*/ -type GetPrefilterFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this get prefilter failure response has a 2xx status code -func (o *GetPrefilterFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get prefilter failure response has a 3xx status code -func (o *GetPrefilterFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get prefilter failure response has a 4xx status code -func (o *GetPrefilterFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this get prefilter failure response has a 5xx status code -func (o *GetPrefilterFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this get prefilter failure response a status code equal to that given -func (o *GetPrefilterFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *GetPrefilterFailure) Error() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %+v", 500, o.Payload) -} - -func (o *GetPrefilterFailure) String() string { - return fmt.Sprintf("[GET /prefilter][%d] getPrefilterFailure %+v", 500, o.Payload) -} - -func (o *GetPrefilterFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *GetPrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_parameters.go deleted file mode 100644 index 14e6a56cf..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_parameters.go +++ /dev/null @@ -1,156 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package prefilter - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPatchPrefilterParams creates a new PatchPrefilterParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPatchPrefilterParams() *PatchPrefilterParams { - return &PatchPrefilterParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPatchPrefilterParamsWithTimeout creates a new PatchPrefilterParams object -// with the ability to set a timeout on a request. -func NewPatchPrefilterParamsWithTimeout(timeout time.Duration) *PatchPrefilterParams { - return &PatchPrefilterParams{ - timeout: timeout, - } -} - -// NewPatchPrefilterParamsWithContext creates a new PatchPrefilterParams object -// with the ability to set a context for a request. -func NewPatchPrefilterParamsWithContext(ctx context.Context) *PatchPrefilterParams { - return &PatchPrefilterParams{ - Context: ctx, - } -} - -// NewPatchPrefilterParamsWithHTTPClient creates a new PatchPrefilterParams object -// with the ability to set a custom HTTPClient for a request. -func NewPatchPrefilterParamsWithHTTPClient(client *http.Client) *PatchPrefilterParams { - return &PatchPrefilterParams{ - HTTPClient: client, - } -} - -/* -PatchPrefilterParams contains all the parameters to send to the API endpoint - - for the patch prefilter operation. - - Typically these are written to a http.Request. -*/ -type PatchPrefilterParams struct { - - /* PrefilterSpec. - - List of CIDR ranges for filter table - */ - PrefilterSpec *models.PrefilterSpec - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the patch prefilter params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchPrefilterParams) WithDefaults() *PatchPrefilterParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the patch prefilter params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PatchPrefilterParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the patch prefilter params -func (o *PatchPrefilterParams) WithTimeout(timeout time.Duration) *PatchPrefilterParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the patch prefilter params -func (o *PatchPrefilterParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the patch prefilter params -func (o *PatchPrefilterParams) WithContext(ctx context.Context) *PatchPrefilterParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the patch prefilter params -func (o *PatchPrefilterParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the patch prefilter params -func (o *PatchPrefilterParams) WithHTTPClient(client *http.Client) *PatchPrefilterParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the patch prefilter params -func (o *PatchPrefilterParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithPrefilterSpec adds the prefilterSpec to the patch prefilter params -func (o *PatchPrefilterParams) WithPrefilterSpec(prefilterSpec *models.PrefilterSpec) *PatchPrefilterParams { - o.SetPrefilterSpec(prefilterSpec) - return o -} - -// SetPrefilterSpec adds the prefilterSpec to the patch prefilter params -func (o *PatchPrefilterParams) SetPrefilterSpec(prefilterSpec *models.PrefilterSpec) { - o.PrefilterSpec = prefilterSpec -} - -// WriteToRequest writes these params to a swagger request -func (o *PatchPrefilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.PrefilterSpec != nil { - if err := r.SetBodyParam(o.PrefilterSpec); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go deleted file mode 100644 index 4767e9cc1..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/patch_prefilter_responses.go +++ /dev/null @@ -1,235 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package prefilter - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PatchPrefilterReader is a Reader for the PatchPrefilter structure. -type PatchPrefilterReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PatchPrefilterReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPatchPrefilterOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 461: - result := NewPatchPrefilterInvalidCIDR() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPatchPrefilterFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPatchPrefilterOK creates a PatchPrefilterOK with default headers values -func NewPatchPrefilterOK() *PatchPrefilterOK { - return &PatchPrefilterOK{} -} - -/* -PatchPrefilterOK describes a response with status code 200, with default header values. - -Updated -*/ -type PatchPrefilterOK struct { - Payload *models.Prefilter -} - -// IsSuccess returns true when this patch prefilter o k response has a 2xx status code -func (o *PatchPrefilterOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this patch prefilter o k response has a 3xx status code -func (o *PatchPrefilterOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch prefilter o k response has a 4xx status code -func (o *PatchPrefilterOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch prefilter o k response has a 5xx status code -func (o *PatchPrefilterOK) IsServerError() bool { - return false -} - -// IsCode returns true when this patch prefilter o k response a status code equal to that given -func (o *PatchPrefilterOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PatchPrefilterOK) Error() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %+v", 200, o.Payload) -} - -func (o *PatchPrefilterOK) String() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterOK %+v", 200, o.Payload) -} - -func (o *PatchPrefilterOK) GetPayload() *models.Prefilter { - return o.Payload -} - -func (o *PatchPrefilterOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Prefilter) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPatchPrefilterInvalidCIDR creates a PatchPrefilterInvalidCIDR with default headers values -func NewPatchPrefilterInvalidCIDR() *PatchPrefilterInvalidCIDR { - return &PatchPrefilterInvalidCIDR{} -} - -/* -PatchPrefilterInvalidCIDR describes a response with status code 461, with default header values. - -Invalid CIDR prefix -*/ -type PatchPrefilterInvalidCIDR struct { - Payload models.Error -} - -// IsSuccess returns true when this patch prefilter invalid c Id r response has a 2xx status code -func (o *PatchPrefilterInvalidCIDR) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch prefilter invalid c Id r response has a 3xx status code -func (o *PatchPrefilterInvalidCIDR) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch prefilter invalid c Id r response has a 4xx status code -func (o *PatchPrefilterInvalidCIDR) IsClientError() bool { - return true -} - -// IsServerError returns true when this patch prefilter invalid c Id r response has a 5xx status code -func (o *PatchPrefilterInvalidCIDR) IsServerError() bool { - return false -} - -// IsCode returns true when this patch prefilter invalid c Id r response a status code equal to that given -func (o *PatchPrefilterInvalidCIDR) IsCode(code int) bool { - return code == 461 -} - -func (o *PatchPrefilterInvalidCIDR) Error() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %+v", 461, o.Payload) -} - -func (o *PatchPrefilterInvalidCIDR) String() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterInvalidCIdR %+v", 461, o.Payload) -} - -func (o *PatchPrefilterInvalidCIDR) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchPrefilterInvalidCIDR) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPatchPrefilterFailure creates a PatchPrefilterFailure with default headers values -func NewPatchPrefilterFailure() *PatchPrefilterFailure { - return &PatchPrefilterFailure{} -} - -/* -PatchPrefilterFailure describes a response with status code 500, with default header values. - -Prefilter update failed -*/ -type PatchPrefilterFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this patch prefilter failure response has a 2xx status code -func (o *PatchPrefilterFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this patch prefilter failure response has a 3xx status code -func (o *PatchPrefilterFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this patch prefilter failure response has a 4xx status code -func (o *PatchPrefilterFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this patch prefilter failure response has a 5xx status code -func (o *PatchPrefilterFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this patch prefilter failure response a status code equal to that given -func (o *PatchPrefilterFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *PatchPrefilterFailure) Error() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %+v", 500, o.Payload) -} - -func (o *PatchPrefilterFailure) String() string { - return fmt.Sprintf("[PATCH /prefilter][%d] patchPrefilterFailure %+v", 500, o.Payload) -} - -func (o *PatchPrefilterFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PatchPrefilterFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go b/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go deleted file mode 100644 index c577a739a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/prefilter/prefilter_client.go +++ /dev/null @@ -1,162 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package prefilter - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new prefilter API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for prefilter API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - DeletePrefilter(params *DeletePrefilterParams, opts ...ClientOption) (*DeletePrefilterOK, error) - - GetPrefilter(params *GetPrefilterParams, opts ...ClientOption) (*GetPrefilterOK, error) - - PatchPrefilter(params *PatchPrefilterParams, opts ...ClientOption) (*PatchPrefilterOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -DeletePrefilter deletes list of c ID rs -*/ -func (a *Client) DeletePrefilter(params *DeletePrefilterParams, opts ...ClientOption) (*DeletePrefilterOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewDeletePrefilterParams() - } - op := &runtime.ClientOperation{ - ID: "DeletePrefilter", - Method: "DELETE", - PathPattern: "/prefilter", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &DeletePrefilterReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*DeletePrefilterOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for DeletePrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetPrefilter retrieves list of c ID rs -*/ -func (a *Client) GetPrefilter(params *GetPrefilterParams, opts ...ClientOption) (*GetPrefilterOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetPrefilterParams() - } - op := &runtime.ClientOperation{ - ID: "GetPrefilter", - Method: "GET", - PathPattern: "/prefilter", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetPrefilterReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetPrefilterOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetPrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PatchPrefilter updates list of c ID rs -*/ -func (a *Client) PatchPrefilter(params *PatchPrefilterParams, opts ...ClientOption) (*PatchPrefilterOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPatchPrefilterParams() - } - op := &runtime.ClientOperation{ - ID: "PatchPrefilter", - Method: "PATCH", - PathPattern: "/prefilter", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PatchPrefilterReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PatchPrefilterOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PatchPrefilter: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_parameters.go deleted file mode 100644 index 6a9a3f362..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_parameters.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewDeleteRecorderIDParams creates a new DeleteRecorderIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewDeleteRecorderIDParams() *DeleteRecorderIDParams { - return &DeleteRecorderIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewDeleteRecorderIDParamsWithTimeout creates a new DeleteRecorderIDParams object -// with the ability to set a timeout on a request. -func NewDeleteRecorderIDParamsWithTimeout(timeout time.Duration) *DeleteRecorderIDParams { - return &DeleteRecorderIDParams{ - timeout: timeout, - } -} - -// NewDeleteRecorderIDParamsWithContext creates a new DeleteRecorderIDParams object -// with the ability to set a context for a request. -func NewDeleteRecorderIDParamsWithContext(ctx context.Context) *DeleteRecorderIDParams { - return &DeleteRecorderIDParams{ - Context: ctx, - } -} - -// NewDeleteRecorderIDParamsWithHTTPClient creates a new DeleteRecorderIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewDeleteRecorderIDParamsWithHTTPClient(client *http.Client) *DeleteRecorderIDParams { - return &DeleteRecorderIDParams{ - HTTPClient: client, - } -} - -/* -DeleteRecorderIDParams contains all the parameters to send to the API endpoint - - for the delete recorder ID operation. - - Typically these are written to a http.Request. -*/ -type DeleteRecorderIDParams struct { - - /* ID. - - ID of recorder - */ - ID int64 - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the delete recorder ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteRecorderIDParams) WithDefaults() *DeleteRecorderIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the delete recorder ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteRecorderIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the delete recorder ID params -func (o *DeleteRecorderIDParams) WithTimeout(timeout time.Duration) *DeleteRecorderIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the delete recorder ID params -func (o *DeleteRecorderIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the delete recorder ID params -func (o *DeleteRecorderIDParams) WithContext(ctx context.Context) *DeleteRecorderIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the delete recorder ID params -func (o *DeleteRecorderIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the delete recorder ID params -func (o *DeleteRecorderIDParams) WithHTTPClient(client *http.Client) *DeleteRecorderIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the delete recorder ID params -func (o *DeleteRecorderIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the delete recorder ID params -func (o *DeleteRecorderIDParams) WithID(id int64) *DeleteRecorderIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the delete recorder ID params -func (o *DeleteRecorderIDParams) SetID(id int64) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *DeleteRecorderIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go deleted file mode 100644 index b74c2ee2f..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/delete_recorder_id_responses.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// DeleteRecorderIDReader is a Reader for the DeleteRecorderID structure. -type DeleteRecorderIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *DeleteRecorderIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewDeleteRecorderIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewDeleteRecorderIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewDeleteRecorderIDFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewDeleteRecorderIDOK creates a DeleteRecorderIDOK with default headers values -func NewDeleteRecorderIDOK() *DeleteRecorderIDOK { - return &DeleteRecorderIDOK{} -} - -/* -DeleteRecorderIDOK describes a response with status code 200, with default header values. - -Success -*/ -type DeleteRecorderIDOK struct { -} - -// IsSuccess returns true when this delete recorder Id o k response has a 2xx status code -func (o *DeleteRecorderIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete recorder Id o k response has a 3xx status code -func (o *DeleteRecorderIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete recorder Id o k response has a 4xx status code -func (o *DeleteRecorderIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete recorder Id o k response has a 5xx status code -func (o *DeleteRecorderIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this delete recorder Id o k response a status code equal to that given -func (o *DeleteRecorderIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *DeleteRecorderIDOK) Error() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK ", 200) -} - -func (o *DeleteRecorderIDOK) String() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdOK ", 200) -} - -func (o *DeleteRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteRecorderIDNotFound creates a DeleteRecorderIDNotFound with default headers values -func NewDeleteRecorderIDNotFound() *DeleteRecorderIDNotFound { - return &DeleteRecorderIDNotFound{} -} - -/* -DeleteRecorderIDNotFound describes a response with status code 404, with default header values. - -Recorder not found -*/ -type DeleteRecorderIDNotFound struct { -} - -// IsSuccess returns true when this delete recorder Id not found response has a 2xx status code -func (o *DeleteRecorderIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete recorder Id not found response has a 3xx status code -func (o *DeleteRecorderIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete recorder Id not found response has a 4xx status code -func (o *DeleteRecorderIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete recorder Id not found response has a 5xx status code -func (o *DeleteRecorderIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this delete recorder Id not found response a status code equal to that given -func (o *DeleteRecorderIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *DeleteRecorderIDNotFound) Error() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound ", 404) -} - -func (o *DeleteRecorderIDNotFound) String() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdNotFound ", 404) -} - -func (o *DeleteRecorderIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteRecorderIDFailure creates a DeleteRecorderIDFailure with default headers values -func NewDeleteRecorderIDFailure() *DeleteRecorderIDFailure { - return &DeleteRecorderIDFailure{} -} - -/* -DeleteRecorderIDFailure describes a response with status code 500, with default header values. - -Recorder deletion failed -*/ -type DeleteRecorderIDFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this delete recorder Id failure response has a 2xx status code -func (o *DeleteRecorderIDFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete recorder Id failure response has a 3xx status code -func (o *DeleteRecorderIDFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete recorder Id failure response has a 4xx status code -func (o *DeleteRecorderIDFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete recorder Id failure response has a 5xx status code -func (o *DeleteRecorderIDFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this delete recorder Id failure response a status code equal to that given -func (o *DeleteRecorderIDFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *DeleteRecorderIDFailure) Error() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %+v", 500, o.Payload) -} - -func (o *DeleteRecorderIDFailure) String() string { - return fmt.Sprintf("[DELETE /recorder/{id}][%d] deleteRecorderIdFailure %+v", 500, o.Payload) -} - -func (o *DeleteRecorderIDFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *DeleteRecorderIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_parameters.go deleted file mode 100644 index 4aae52c7d..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_parameters.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewGetRecorderIDParams creates a new GetRecorderIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetRecorderIDParams() *GetRecorderIDParams { - return &GetRecorderIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetRecorderIDParamsWithTimeout creates a new GetRecorderIDParams object -// with the ability to set a timeout on a request. -func NewGetRecorderIDParamsWithTimeout(timeout time.Duration) *GetRecorderIDParams { - return &GetRecorderIDParams{ - timeout: timeout, - } -} - -// NewGetRecorderIDParamsWithContext creates a new GetRecorderIDParams object -// with the ability to set a context for a request. -func NewGetRecorderIDParamsWithContext(ctx context.Context) *GetRecorderIDParams { - return &GetRecorderIDParams{ - Context: ctx, - } -} - -// NewGetRecorderIDParamsWithHTTPClient creates a new GetRecorderIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetRecorderIDParamsWithHTTPClient(client *http.Client) *GetRecorderIDParams { - return &GetRecorderIDParams{ - HTTPClient: client, - } -} - -/* -GetRecorderIDParams contains all the parameters to send to the API endpoint - - for the get recorder ID operation. - - Typically these are written to a http.Request. -*/ -type GetRecorderIDParams struct { - - /* ID. - - ID of recorder - */ - ID int64 - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get recorder ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetRecorderIDParams) WithDefaults() *GetRecorderIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get recorder ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetRecorderIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get recorder ID params -func (o *GetRecorderIDParams) WithTimeout(timeout time.Duration) *GetRecorderIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get recorder ID params -func (o *GetRecorderIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get recorder ID params -func (o *GetRecorderIDParams) WithContext(ctx context.Context) *GetRecorderIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get recorder ID params -func (o *GetRecorderIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get recorder ID params -func (o *GetRecorderIDParams) WithHTTPClient(client *http.Client) *GetRecorderIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get recorder ID params -func (o *GetRecorderIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get recorder ID params -func (o *GetRecorderIDParams) WithID(id int64) *GetRecorderIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get recorder ID params -func (o *GetRecorderIDParams) SetID(id int64) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetRecorderIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go deleted file mode 100644 index 07f4e60cd..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_id_responses.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetRecorderIDReader is a Reader for the GetRecorderID structure. -type GetRecorderIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetRecorderIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetRecorderIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetRecorderIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetRecorderIDOK creates a GetRecorderIDOK with default headers values -func NewGetRecorderIDOK() *GetRecorderIDOK { - return &GetRecorderIDOK{} -} - -/* -GetRecorderIDOK describes a response with status code 200, with default header values. - -Success -*/ -type GetRecorderIDOK struct { - Payload *models.Recorder -} - -// IsSuccess returns true when this get recorder Id o k response has a 2xx status code -func (o *GetRecorderIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get recorder Id o k response has a 3xx status code -func (o *GetRecorderIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get recorder Id o k response has a 4xx status code -func (o *GetRecorderIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get recorder Id o k response has a 5xx status code -func (o *GetRecorderIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get recorder Id o k response a status code equal to that given -func (o *GetRecorderIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetRecorderIDOK) Error() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %+v", 200, o.Payload) -} - -func (o *GetRecorderIDOK) String() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdOK %+v", 200, o.Payload) -} - -func (o *GetRecorderIDOK) GetPayload() *models.Recorder { - return o.Payload -} - -func (o *GetRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Recorder) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetRecorderIDNotFound creates a GetRecorderIDNotFound with default headers values -func NewGetRecorderIDNotFound() *GetRecorderIDNotFound { - return &GetRecorderIDNotFound{} -} - -/* -GetRecorderIDNotFound describes a response with status code 404, with default header values. - -Recorder not found -*/ -type GetRecorderIDNotFound struct { -} - -// IsSuccess returns true when this get recorder Id not found response has a 2xx status code -func (o *GetRecorderIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get recorder Id not found response has a 3xx status code -func (o *GetRecorderIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get recorder Id not found response has a 4xx status code -func (o *GetRecorderIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get recorder Id not found response has a 5xx status code -func (o *GetRecorderIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get recorder Id not found response a status code equal to that given -func (o *GetRecorderIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetRecorderIDNotFound) Error() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound ", 404) -} - -func (o *GetRecorderIDNotFound) String() string { - return fmt.Sprintf("[GET /recorder/{id}][%d] getRecorderIdNotFound ", 404) -} - -func (o *GetRecorderIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_parameters.go deleted file mode 100644 index 4dd487344..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetRecorderMasksParams creates a new GetRecorderMasksParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetRecorderMasksParams() *GetRecorderMasksParams { - return &GetRecorderMasksParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetRecorderMasksParamsWithTimeout creates a new GetRecorderMasksParams object -// with the ability to set a timeout on a request. -func NewGetRecorderMasksParamsWithTimeout(timeout time.Duration) *GetRecorderMasksParams { - return &GetRecorderMasksParams{ - timeout: timeout, - } -} - -// NewGetRecorderMasksParamsWithContext creates a new GetRecorderMasksParams object -// with the ability to set a context for a request. -func NewGetRecorderMasksParamsWithContext(ctx context.Context) *GetRecorderMasksParams { - return &GetRecorderMasksParams{ - Context: ctx, - } -} - -// NewGetRecorderMasksParamsWithHTTPClient creates a new GetRecorderMasksParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetRecorderMasksParamsWithHTTPClient(client *http.Client) *GetRecorderMasksParams { - return &GetRecorderMasksParams{ - HTTPClient: client, - } -} - -/* -GetRecorderMasksParams contains all the parameters to send to the API endpoint - - for the get recorder masks operation. - - Typically these are written to a http.Request. -*/ -type GetRecorderMasksParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get recorder masks params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetRecorderMasksParams) WithDefaults() *GetRecorderMasksParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get recorder masks params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetRecorderMasksParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get recorder masks params -func (o *GetRecorderMasksParams) WithTimeout(timeout time.Duration) *GetRecorderMasksParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get recorder masks params -func (o *GetRecorderMasksParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get recorder masks params -func (o *GetRecorderMasksParams) WithContext(ctx context.Context) *GetRecorderMasksParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get recorder masks params -func (o *GetRecorderMasksParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get recorder masks params -func (o *GetRecorderMasksParams) WithHTTPClient(client *http.Client) *GetRecorderMasksParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get recorder masks params -func (o *GetRecorderMasksParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetRecorderMasksParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go deleted file mode 100644 index 06b855596..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_masks_responses.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetRecorderMasksReader is a Reader for the GetRecorderMasks structure. -type GetRecorderMasksReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetRecorderMasksReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetRecorderMasksOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetRecorderMasksOK creates a GetRecorderMasksOK with default headers values -func NewGetRecorderMasksOK() *GetRecorderMasksOK { - return &GetRecorderMasksOK{} -} - -/* -GetRecorderMasksOK describes a response with status code 200, with default header values. - -Success -*/ -type GetRecorderMasksOK struct { - Payload []*models.RecorderMask -} - -// IsSuccess returns true when this get recorder masks o k response has a 2xx status code -func (o *GetRecorderMasksOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get recorder masks o k response has a 3xx status code -func (o *GetRecorderMasksOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get recorder masks o k response has a 4xx status code -func (o *GetRecorderMasksOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get recorder masks o k response has a 5xx status code -func (o *GetRecorderMasksOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get recorder masks o k response a status code equal to that given -func (o *GetRecorderMasksOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetRecorderMasksOK) Error() string { - return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %+v", 200, o.Payload) -} - -func (o *GetRecorderMasksOK) String() string { - return fmt.Sprintf("[GET /recorder/masks][%d] getRecorderMasksOK %+v", 200, o.Payload) -} - -func (o *GetRecorderMasksOK) GetPayload() []*models.RecorderMask { - return o.Payload -} - -func (o *GetRecorderMasksOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_parameters.go deleted file mode 100644 index d7ff113b2..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetRecorderParams creates a new GetRecorderParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetRecorderParams() *GetRecorderParams { - return &GetRecorderParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetRecorderParamsWithTimeout creates a new GetRecorderParams object -// with the ability to set a timeout on a request. -func NewGetRecorderParamsWithTimeout(timeout time.Duration) *GetRecorderParams { - return &GetRecorderParams{ - timeout: timeout, - } -} - -// NewGetRecorderParamsWithContext creates a new GetRecorderParams object -// with the ability to set a context for a request. -func NewGetRecorderParamsWithContext(ctx context.Context) *GetRecorderParams { - return &GetRecorderParams{ - Context: ctx, - } -} - -// NewGetRecorderParamsWithHTTPClient creates a new GetRecorderParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetRecorderParamsWithHTTPClient(client *http.Client) *GetRecorderParams { - return &GetRecorderParams{ - HTTPClient: client, - } -} - -/* -GetRecorderParams contains all the parameters to send to the API endpoint - - for the get recorder operation. - - Typically these are written to a http.Request. -*/ -type GetRecorderParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get recorder params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetRecorderParams) WithDefaults() *GetRecorderParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get recorder params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetRecorderParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get recorder params -func (o *GetRecorderParams) WithTimeout(timeout time.Duration) *GetRecorderParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get recorder params -func (o *GetRecorderParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get recorder params -func (o *GetRecorderParams) WithContext(ctx context.Context) *GetRecorderParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get recorder params -func (o *GetRecorderParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get recorder params -func (o *GetRecorderParams) WithHTTPClient(client *http.Client) *GetRecorderParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get recorder params -func (o *GetRecorderParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetRecorderParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go deleted file mode 100644 index f45762c30..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/get_recorder_responses.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetRecorderReader is a Reader for the GetRecorder structure. -type GetRecorderReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetRecorderReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetRecorderOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetRecorderOK creates a GetRecorderOK with default headers values -func NewGetRecorderOK() *GetRecorderOK { - return &GetRecorderOK{} -} - -/* -GetRecorderOK describes a response with status code 200, with default header values. - -Success -*/ -type GetRecorderOK struct { - Payload []*models.Recorder -} - -// IsSuccess returns true when this get recorder o k response has a 2xx status code -func (o *GetRecorderOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get recorder o k response has a 3xx status code -func (o *GetRecorderOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get recorder o k response has a 4xx status code -func (o *GetRecorderOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get recorder o k response has a 5xx status code -func (o *GetRecorderOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get recorder o k response a status code equal to that given -func (o *GetRecorderOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetRecorderOK) Error() string { - return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %+v", 200, o.Payload) -} - -func (o *GetRecorderOK) String() string { - return fmt.Sprintf("[GET /recorder][%d] getRecorderOK %+v", 200, o.Payload) -} - -func (o *GetRecorderOK) GetPayload() []*models.Recorder { - return o.Payload -} - -func (o *GetRecorderOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_parameters.go deleted file mode 100644 index 3515d3c76..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_parameters.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPutRecorderIDParams creates a new PutRecorderIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPutRecorderIDParams() *PutRecorderIDParams { - return &PutRecorderIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPutRecorderIDParamsWithTimeout creates a new PutRecorderIDParams object -// with the ability to set a timeout on a request. -func NewPutRecorderIDParamsWithTimeout(timeout time.Duration) *PutRecorderIDParams { - return &PutRecorderIDParams{ - timeout: timeout, - } -} - -// NewPutRecorderIDParamsWithContext creates a new PutRecorderIDParams object -// with the ability to set a context for a request. -func NewPutRecorderIDParamsWithContext(ctx context.Context) *PutRecorderIDParams { - return &PutRecorderIDParams{ - Context: ctx, - } -} - -// NewPutRecorderIDParamsWithHTTPClient creates a new PutRecorderIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewPutRecorderIDParamsWithHTTPClient(client *http.Client) *PutRecorderIDParams { - return &PutRecorderIDParams{ - HTTPClient: client, - } -} - -/* -PutRecorderIDParams contains all the parameters to send to the API endpoint - - for the put recorder ID operation. - - Typically these are written to a http.Request. -*/ -type PutRecorderIDParams struct { - - /* Config. - - Recorder configuration - */ - Config *models.RecorderSpec - - /* ID. - - ID of recorder - */ - ID int64 - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the put recorder ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutRecorderIDParams) WithDefaults() *PutRecorderIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the put recorder ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutRecorderIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the put recorder ID params -func (o *PutRecorderIDParams) WithTimeout(timeout time.Duration) *PutRecorderIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the put recorder ID params -func (o *PutRecorderIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the put recorder ID params -func (o *PutRecorderIDParams) WithContext(ctx context.Context) *PutRecorderIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the put recorder ID params -func (o *PutRecorderIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the put recorder ID params -func (o *PutRecorderIDParams) WithHTTPClient(client *http.Client) *PutRecorderIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the put recorder ID params -func (o *PutRecorderIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithConfig adds the config to the put recorder ID params -func (o *PutRecorderIDParams) WithConfig(config *models.RecorderSpec) *PutRecorderIDParams { - o.SetConfig(config) - return o -} - -// SetConfig adds the config to the put recorder ID params -func (o *PutRecorderIDParams) SetConfig(config *models.RecorderSpec) { - o.Config = config -} - -// WithID adds the id to the put recorder ID params -func (o *PutRecorderIDParams) WithID(id int64) *PutRecorderIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the put recorder ID params -func (o *PutRecorderIDParams) SetID(id int64) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *PutRecorderIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Config != nil { - if err := r.SetBodyParam(o.Config); err != nil { - return err - } - } - - // path param id - if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go deleted file mode 100644 index 5e1c4272e..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/put_recorder_id_responses.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PutRecorderIDReader is a Reader for the PutRecorderID structure. -type PutRecorderIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PutRecorderIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPutRecorderIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 201: - result := NewPutRecorderIDCreated() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewPutRecorderIDFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPutRecorderIDOK creates a PutRecorderIDOK with default headers values -func NewPutRecorderIDOK() *PutRecorderIDOK { - return &PutRecorderIDOK{} -} - -/* -PutRecorderIDOK describes a response with status code 200, with default header values. - -Updated -*/ -type PutRecorderIDOK struct { -} - -// IsSuccess returns true when this put recorder Id o k response has a 2xx status code -func (o *PutRecorderIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this put recorder Id o k response has a 3xx status code -func (o *PutRecorderIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put recorder Id o k response has a 4xx status code -func (o *PutRecorderIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this put recorder Id o k response has a 5xx status code -func (o *PutRecorderIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this put recorder Id o k response a status code equal to that given -func (o *PutRecorderIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PutRecorderIDOK) Error() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK ", 200) -} - -func (o *PutRecorderIDOK) String() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdOK ", 200) -} - -func (o *PutRecorderIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPutRecorderIDCreated creates a PutRecorderIDCreated with default headers values -func NewPutRecorderIDCreated() *PutRecorderIDCreated { - return &PutRecorderIDCreated{} -} - -/* -PutRecorderIDCreated describes a response with status code 201, with default header values. - -Created -*/ -type PutRecorderIDCreated struct { -} - -// IsSuccess returns true when this put recorder Id created response has a 2xx status code -func (o *PutRecorderIDCreated) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this put recorder Id created response has a 3xx status code -func (o *PutRecorderIDCreated) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put recorder Id created response has a 4xx status code -func (o *PutRecorderIDCreated) IsClientError() bool { - return false -} - -// IsServerError returns true when this put recorder Id created response has a 5xx status code -func (o *PutRecorderIDCreated) IsServerError() bool { - return false -} - -// IsCode returns true when this put recorder Id created response a status code equal to that given -func (o *PutRecorderIDCreated) IsCode(code int) bool { - return code == 201 -} - -func (o *PutRecorderIDCreated) Error() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated ", 201) -} - -func (o *PutRecorderIDCreated) String() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdCreated ", 201) -} - -func (o *PutRecorderIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPutRecorderIDFailure creates a PutRecorderIDFailure with default headers values -func NewPutRecorderIDFailure() *PutRecorderIDFailure { - return &PutRecorderIDFailure{} -} - -/* -PutRecorderIDFailure describes a response with status code 500, with default header values. - -Error while creating recorder -*/ -type PutRecorderIDFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this put recorder Id failure response has a 2xx status code -func (o *PutRecorderIDFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put recorder Id failure response has a 3xx status code -func (o *PutRecorderIDFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put recorder Id failure response has a 4xx status code -func (o *PutRecorderIDFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this put recorder Id failure response has a 5xx status code -func (o *PutRecorderIDFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this put recorder Id failure response a status code equal to that given -func (o *PutRecorderIDFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *PutRecorderIDFailure) Error() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %+v", 500, o.Payload) -} - -func (o *PutRecorderIDFailure) String() string { - return fmt.Sprintf("[PUT /recorder/{id}][%d] putRecorderIdFailure %+v", 500, o.Payload) -} - -func (o *PutRecorderIDFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PutRecorderIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go b/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go deleted file mode 100644 index 883e304be..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/recorder/recorder_client.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package recorder - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new recorder API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for recorder API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - DeleteRecorderID(params *DeleteRecorderIDParams, opts ...ClientOption) (*DeleteRecorderIDOK, error) - - GetRecorder(params *GetRecorderParams, opts ...ClientOption) (*GetRecorderOK, error) - - GetRecorderID(params *GetRecorderIDParams, opts ...ClientOption) (*GetRecorderIDOK, error) - - GetRecorderMasks(params *GetRecorderMasksParams, opts ...ClientOption) (*GetRecorderMasksOK, error) - - PutRecorderID(params *PutRecorderIDParams, opts ...ClientOption) (*PutRecorderIDOK, *PutRecorderIDCreated, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -DeleteRecorderID deletes a recorder -*/ -func (a *Client) DeleteRecorderID(params *DeleteRecorderIDParams, opts ...ClientOption) (*DeleteRecorderIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewDeleteRecorderIDParams() - } - op := &runtime.ClientOperation{ - ID: "DeleteRecorderID", - Method: "DELETE", - PathPattern: "/recorder/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &DeleteRecorderIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*DeleteRecorderIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for DeleteRecorderID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetRecorder retrieves list of all recorders -*/ -func (a *Client) GetRecorder(params *GetRecorderParams, opts ...ClientOption) (*GetRecorderOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetRecorderParams() - } - op := &runtime.ClientOperation{ - ID: "GetRecorder", - Method: "GET", - PathPattern: "/recorder", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetRecorderReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetRecorderOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetRecorder: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetRecorderID retrieves configuration of a recorder -*/ -func (a *Client) GetRecorderID(params *GetRecorderIDParams, opts ...ClientOption) (*GetRecorderIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetRecorderIDParams() - } - op := &runtime.ClientOperation{ - ID: "GetRecorderID", - Method: "GET", - PathPattern: "/recorder/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetRecorderIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetRecorderIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetRecorderID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetRecorderMasks retrieves list of all recorder masks -*/ -func (a *Client) GetRecorderMasks(params *GetRecorderMasksParams, opts ...ClientOption) (*GetRecorderMasksOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetRecorderMasksParams() - } - op := &runtime.ClientOperation{ - ID: "GetRecorderMasks", - Method: "GET", - PathPattern: "/recorder/masks", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetRecorderMasksReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetRecorderMasksOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetRecorderMasks: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PutRecorderID creates or update recorder -*/ -func (a *Client) PutRecorderID(params *PutRecorderIDParams, opts ...ClientOption) (*PutRecorderIDOK, *PutRecorderIDCreated, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPutRecorderIDParams() - } - op := &runtime.ClientOperation{ - ID: "PutRecorderID", - Method: "PUT", - PathPattern: "/recorder/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PutRecorderIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, nil, err - } - switch value := result.(type) { - case *PutRecorderIDOK: - return value, nil, nil - case *PutRecorderIDCreated: - return nil, value, nil - } - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for recorder: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_parameters.go deleted file mode 100644 index a464423c8..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_parameters.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewDeleteServiceIDParams creates a new DeleteServiceIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewDeleteServiceIDParams() *DeleteServiceIDParams { - return &DeleteServiceIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewDeleteServiceIDParamsWithTimeout creates a new DeleteServiceIDParams object -// with the ability to set a timeout on a request. -func NewDeleteServiceIDParamsWithTimeout(timeout time.Duration) *DeleteServiceIDParams { - return &DeleteServiceIDParams{ - timeout: timeout, - } -} - -// NewDeleteServiceIDParamsWithContext creates a new DeleteServiceIDParams object -// with the ability to set a context for a request. -func NewDeleteServiceIDParamsWithContext(ctx context.Context) *DeleteServiceIDParams { - return &DeleteServiceIDParams{ - Context: ctx, - } -} - -// NewDeleteServiceIDParamsWithHTTPClient creates a new DeleteServiceIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewDeleteServiceIDParamsWithHTTPClient(client *http.Client) *DeleteServiceIDParams { - return &DeleteServiceIDParams{ - HTTPClient: client, - } -} - -/* -DeleteServiceIDParams contains all the parameters to send to the API endpoint - - for the delete service ID operation. - - Typically these are written to a http.Request. -*/ -type DeleteServiceIDParams struct { - - /* ID. - - ID of service - */ - ID int64 - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the delete service ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteServiceIDParams) WithDefaults() *DeleteServiceIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the delete service ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *DeleteServiceIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the delete service ID params -func (o *DeleteServiceIDParams) WithTimeout(timeout time.Duration) *DeleteServiceIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the delete service ID params -func (o *DeleteServiceIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the delete service ID params -func (o *DeleteServiceIDParams) WithContext(ctx context.Context) *DeleteServiceIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the delete service ID params -func (o *DeleteServiceIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the delete service ID params -func (o *DeleteServiceIDParams) WithHTTPClient(client *http.Client) *DeleteServiceIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the delete service ID params -func (o *DeleteServiceIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the delete service ID params -func (o *DeleteServiceIDParams) WithID(id int64) *DeleteServiceIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the delete service ID params -func (o *DeleteServiceIDParams) SetID(id int64) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *DeleteServiceIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go deleted file mode 100644 index 272cdcbc2..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/delete_service_id_responses.go +++ /dev/null @@ -1,213 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// DeleteServiceIDReader is a Reader for the DeleteServiceID structure. -type DeleteServiceIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *DeleteServiceIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewDeleteServiceIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewDeleteServiceIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewDeleteServiceIDFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewDeleteServiceIDOK creates a DeleteServiceIDOK with default headers values -func NewDeleteServiceIDOK() *DeleteServiceIDOK { - return &DeleteServiceIDOK{} -} - -/* -DeleteServiceIDOK describes a response with status code 200, with default header values. - -Success -*/ -type DeleteServiceIDOK struct { -} - -// IsSuccess returns true when this delete service Id o k response has a 2xx status code -func (o *DeleteServiceIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this delete service Id o k response has a 3xx status code -func (o *DeleteServiceIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete service Id o k response has a 4xx status code -func (o *DeleteServiceIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete service Id o k response has a 5xx status code -func (o *DeleteServiceIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this delete service Id o k response a status code equal to that given -func (o *DeleteServiceIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *DeleteServiceIDOK) Error() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK ", 200) -} - -func (o *DeleteServiceIDOK) String() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdOK ", 200) -} - -func (o *DeleteServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteServiceIDNotFound creates a DeleteServiceIDNotFound with default headers values -func NewDeleteServiceIDNotFound() *DeleteServiceIDNotFound { - return &DeleteServiceIDNotFound{} -} - -/* -DeleteServiceIDNotFound describes a response with status code 404, with default header values. - -Service not found -*/ -type DeleteServiceIDNotFound struct { -} - -// IsSuccess returns true when this delete service Id not found response has a 2xx status code -func (o *DeleteServiceIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete service Id not found response has a 3xx status code -func (o *DeleteServiceIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete service Id not found response has a 4xx status code -func (o *DeleteServiceIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this delete service Id not found response has a 5xx status code -func (o *DeleteServiceIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this delete service Id not found response a status code equal to that given -func (o *DeleteServiceIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *DeleteServiceIDNotFound) Error() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound ", 404) -} - -func (o *DeleteServiceIDNotFound) String() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdNotFound ", 404) -} - -func (o *DeleteServiceIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewDeleteServiceIDFailure creates a DeleteServiceIDFailure with default headers values -func NewDeleteServiceIDFailure() *DeleteServiceIDFailure { - return &DeleteServiceIDFailure{} -} - -/* -DeleteServiceIDFailure describes a response with status code 500, with default header values. - -Service deletion failed -*/ -type DeleteServiceIDFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this delete service Id failure response has a 2xx status code -func (o *DeleteServiceIDFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this delete service Id failure response has a 3xx status code -func (o *DeleteServiceIDFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this delete service Id failure response has a 4xx status code -func (o *DeleteServiceIDFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this delete service Id failure response has a 5xx status code -func (o *DeleteServiceIDFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this delete service Id failure response a status code equal to that given -func (o *DeleteServiceIDFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *DeleteServiceIDFailure) Error() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %+v", 500, o.Payload) -} - -func (o *DeleteServiceIDFailure) String() string { - return fmt.Sprintf("[DELETE /service/{id}][%d] deleteServiceIdFailure %+v", 500, o.Payload) -} - -func (o *DeleteServiceIDFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *DeleteServiceIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_parameters.go deleted file mode 100644 index beb277ecd..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetLrpParams creates a new GetLrpParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetLrpParams() *GetLrpParams { - return &GetLrpParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetLrpParamsWithTimeout creates a new GetLrpParams object -// with the ability to set a timeout on a request. -func NewGetLrpParamsWithTimeout(timeout time.Duration) *GetLrpParams { - return &GetLrpParams{ - timeout: timeout, - } -} - -// NewGetLrpParamsWithContext creates a new GetLrpParams object -// with the ability to set a context for a request. -func NewGetLrpParamsWithContext(ctx context.Context) *GetLrpParams { - return &GetLrpParams{ - Context: ctx, - } -} - -// NewGetLrpParamsWithHTTPClient creates a new GetLrpParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetLrpParamsWithHTTPClient(client *http.Client) *GetLrpParams { - return &GetLrpParams{ - HTTPClient: client, - } -} - -/* -GetLrpParams contains all the parameters to send to the API endpoint - - for the get lrp operation. - - Typically these are written to a http.Request. -*/ -type GetLrpParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get lrp params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetLrpParams) WithDefaults() *GetLrpParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get lrp params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetLrpParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get lrp params -func (o *GetLrpParams) WithTimeout(timeout time.Duration) *GetLrpParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get lrp params -func (o *GetLrpParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get lrp params -func (o *GetLrpParams) WithContext(ctx context.Context) *GetLrpParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get lrp params -func (o *GetLrpParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get lrp params -func (o *GetLrpParams) WithHTTPClient(client *http.Client) *GetLrpParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get lrp params -func (o *GetLrpParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetLrpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go deleted file mode 100644 index ac5974ce5..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_lrp_responses.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetLrpReader is a Reader for the GetLrp structure. -type GetLrpReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetLrpReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetLrpOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetLrpOK creates a GetLrpOK with default headers values -func NewGetLrpOK() *GetLrpOK { - return &GetLrpOK{} -} - -/* -GetLrpOK describes a response with status code 200, with default header values. - -Success -*/ -type GetLrpOK struct { - Payload []*models.LRPSpec -} - -// IsSuccess returns true when this get lrp o k response has a 2xx status code -func (o *GetLrpOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get lrp o k response has a 3xx status code -func (o *GetLrpOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get lrp o k response has a 4xx status code -func (o *GetLrpOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get lrp o k response has a 5xx status code -func (o *GetLrpOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get lrp o k response a status code equal to that given -func (o *GetLrpOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetLrpOK) Error() string { - return fmt.Sprintf("[GET /lrp][%d] getLrpOK %+v", 200, o.Payload) -} - -func (o *GetLrpOK) String() string { - return fmt.Sprintf("[GET /lrp][%d] getLrpOK %+v", 200, o.Payload) -} - -func (o *GetLrpOK) GetPayload() []*models.LRPSpec { - return o.Payload -} - -func (o *GetLrpOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_parameters.go deleted file mode 100644 index dac152aae..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_parameters.go +++ /dev/null @@ -1,155 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NewGetServiceIDParams creates a new GetServiceIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetServiceIDParams() *GetServiceIDParams { - return &GetServiceIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetServiceIDParamsWithTimeout creates a new GetServiceIDParams object -// with the ability to set a timeout on a request. -func NewGetServiceIDParamsWithTimeout(timeout time.Duration) *GetServiceIDParams { - return &GetServiceIDParams{ - timeout: timeout, - } -} - -// NewGetServiceIDParamsWithContext creates a new GetServiceIDParams object -// with the ability to set a context for a request. -func NewGetServiceIDParamsWithContext(ctx context.Context) *GetServiceIDParams { - return &GetServiceIDParams{ - Context: ctx, - } -} - -// NewGetServiceIDParamsWithHTTPClient creates a new GetServiceIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetServiceIDParamsWithHTTPClient(client *http.Client) *GetServiceIDParams { - return &GetServiceIDParams{ - HTTPClient: client, - } -} - -/* -GetServiceIDParams contains all the parameters to send to the API endpoint - - for the get service ID operation. - - Typically these are written to a http.Request. -*/ -type GetServiceIDParams struct { - - /* ID. - - ID of service - */ - ID int64 - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get service ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetServiceIDParams) WithDefaults() *GetServiceIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get service ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetServiceIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get service ID params -func (o *GetServiceIDParams) WithTimeout(timeout time.Duration) *GetServiceIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get service ID params -func (o *GetServiceIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get service ID params -func (o *GetServiceIDParams) WithContext(ctx context.Context) *GetServiceIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get service ID params -func (o *GetServiceIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get service ID params -func (o *GetServiceIDParams) WithHTTPClient(client *http.Client) *GetServiceIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get service ID params -func (o *GetServiceIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithID adds the id to the get service ID params -func (o *GetServiceIDParams) WithID(id int64) *GetServiceIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the get service ID params -func (o *GetServiceIDParams) SetID(id int64) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *GetServiceIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - // path param id - if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go deleted file mode 100644 index 4b198ced9..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_id_responses.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetServiceIDReader is a Reader for the GetServiceID structure. -type GetServiceIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetServiceIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetServiceIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 404: - result := NewGetServiceIDNotFound() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetServiceIDOK creates a GetServiceIDOK with default headers values -func NewGetServiceIDOK() *GetServiceIDOK { - return &GetServiceIDOK{} -} - -/* -GetServiceIDOK describes a response with status code 200, with default header values. - -Success -*/ -type GetServiceIDOK struct { - Payload *models.Service -} - -// IsSuccess returns true when this get service Id o k response has a 2xx status code -func (o *GetServiceIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get service Id o k response has a 3xx status code -func (o *GetServiceIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get service Id o k response has a 4xx status code -func (o *GetServiceIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get service Id o k response has a 5xx status code -func (o *GetServiceIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get service Id o k response a status code equal to that given -func (o *GetServiceIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetServiceIDOK) Error() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %+v", 200, o.Payload) -} - -func (o *GetServiceIDOK) String() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdOK %+v", 200, o.Payload) -} - -func (o *GetServiceIDOK) GetPayload() *models.Service { - return o.Payload -} - -func (o *GetServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.Service) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetServiceIDNotFound creates a GetServiceIDNotFound with default headers values -func NewGetServiceIDNotFound() *GetServiceIDNotFound { - return &GetServiceIDNotFound{} -} - -/* -GetServiceIDNotFound describes a response with status code 404, with default header values. - -Service not found -*/ -type GetServiceIDNotFound struct { -} - -// IsSuccess returns true when this get service Id not found response has a 2xx status code -func (o *GetServiceIDNotFound) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get service Id not found response has a 3xx status code -func (o *GetServiceIDNotFound) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get service Id not found response has a 4xx status code -func (o *GetServiceIDNotFound) IsClientError() bool { - return true -} - -// IsServerError returns true when this get service Id not found response has a 5xx status code -func (o *GetServiceIDNotFound) IsServerError() bool { - return false -} - -// IsCode returns true when this get service Id not found response a status code equal to that given -func (o *GetServiceIDNotFound) IsCode(code int) bool { - return code == 404 -} - -func (o *GetServiceIDNotFound) Error() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound ", 404) -} - -func (o *GetServiceIDNotFound) String() string { - return fmt.Sprintf("[GET /service/{id}][%d] getServiceIdNotFound ", 404) -} - -func (o *GetServiceIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_parameters.go deleted file mode 100644 index ff7dfcb84..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetServiceParams creates a new GetServiceParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetServiceParams() *GetServiceParams { - return &GetServiceParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetServiceParamsWithTimeout creates a new GetServiceParams object -// with the ability to set a timeout on a request. -func NewGetServiceParamsWithTimeout(timeout time.Duration) *GetServiceParams { - return &GetServiceParams{ - timeout: timeout, - } -} - -// NewGetServiceParamsWithContext creates a new GetServiceParams object -// with the ability to set a context for a request. -func NewGetServiceParamsWithContext(ctx context.Context) *GetServiceParams { - return &GetServiceParams{ - Context: ctx, - } -} - -// NewGetServiceParamsWithHTTPClient creates a new GetServiceParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetServiceParamsWithHTTPClient(client *http.Client) *GetServiceParams { - return &GetServiceParams{ - HTTPClient: client, - } -} - -/* -GetServiceParams contains all the parameters to send to the API endpoint - - for the get service operation. - - Typically these are written to a http.Request. -*/ -type GetServiceParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get service params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetServiceParams) WithDefaults() *GetServiceParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get service params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetServiceParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get service params -func (o *GetServiceParams) WithTimeout(timeout time.Duration) *GetServiceParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get service params -func (o *GetServiceParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get service params -func (o *GetServiceParams) WithContext(ctx context.Context) *GetServiceParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get service params -func (o *GetServiceParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get service params -func (o *GetServiceParams) WithHTTPClient(client *http.Client) *GetServiceParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get service params -func (o *GetServiceParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetServiceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go deleted file mode 100644 index dbf75ba84..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/get_service_responses.go +++ /dev/null @@ -1,99 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// GetServiceReader is a Reader for the GetService structure. -type GetServiceReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetServiceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetServiceOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetServiceOK creates a GetServiceOK with default headers values -func NewGetServiceOK() *GetServiceOK { - return &GetServiceOK{} -} - -/* -GetServiceOK describes a response with status code 200, with default header values. - -Success -*/ -type GetServiceOK struct { - Payload []*models.Service -} - -// IsSuccess returns true when this get service o k response has a 2xx status code -func (o *GetServiceOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get service o k response has a 3xx status code -func (o *GetServiceOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get service o k response has a 4xx status code -func (o *GetServiceOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get service o k response has a 5xx status code -func (o *GetServiceOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get service o k response a status code equal to that given -func (o *GetServiceOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetServiceOK) Error() string { - return fmt.Sprintf("[GET /service][%d] getServiceOK %+v", 200, o.Payload) -} - -func (o *GetServiceOK) String() string { - return fmt.Sprintf("[GET /service][%d] getServiceOK %+v", 200, o.Payload) -} - -func (o *GetServiceOK) GetPayload() []*models.Service { - return o.Payload -} - -func (o *GetServiceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_parameters.go b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_parameters.go deleted file mode 100644 index b1877e4b7..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_parameters.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - - "github.com/cilium/cilium/api/v1/models" -) - -// NewPutServiceIDParams creates a new PutServiceIDParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPutServiceIDParams() *PutServiceIDParams { - return &PutServiceIDParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPutServiceIDParamsWithTimeout creates a new PutServiceIDParams object -// with the ability to set a timeout on a request. -func NewPutServiceIDParamsWithTimeout(timeout time.Duration) *PutServiceIDParams { - return &PutServiceIDParams{ - timeout: timeout, - } -} - -// NewPutServiceIDParamsWithContext creates a new PutServiceIDParams object -// with the ability to set a context for a request. -func NewPutServiceIDParamsWithContext(ctx context.Context) *PutServiceIDParams { - return &PutServiceIDParams{ - Context: ctx, - } -} - -// NewPutServiceIDParamsWithHTTPClient creates a new PutServiceIDParams object -// with the ability to set a custom HTTPClient for a request. -func NewPutServiceIDParamsWithHTTPClient(client *http.Client) *PutServiceIDParams { - return &PutServiceIDParams{ - HTTPClient: client, - } -} - -/* -PutServiceIDParams contains all the parameters to send to the API endpoint - - for the put service ID operation. - - Typically these are written to a http.Request. -*/ -type PutServiceIDParams struct { - - /* Config. - - Service configuration - */ - Config *models.ServiceSpec - - /* ID. - - ID of service - */ - ID int64 - - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the put service ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutServiceIDParams) WithDefaults() *PutServiceIDParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the put service ID params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutServiceIDParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the put service ID params -func (o *PutServiceIDParams) WithTimeout(timeout time.Duration) *PutServiceIDParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the put service ID params -func (o *PutServiceIDParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the put service ID params -func (o *PutServiceIDParams) WithContext(ctx context.Context) *PutServiceIDParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the put service ID params -func (o *PutServiceIDParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the put service ID params -func (o *PutServiceIDParams) WithHTTPClient(client *http.Client) *PutServiceIDParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the put service ID params -func (o *PutServiceIDParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WithConfig adds the config to the put service ID params -func (o *PutServiceIDParams) WithConfig(config *models.ServiceSpec) *PutServiceIDParams { - o.SetConfig(config) - return o -} - -// SetConfig adds the config to the put service ID params -func (o *PutServiceIDParams) SetConfig(config *models.ServiceSpec) { - o.Config = config -} - -// WithID adds the id to the put service ID params -func (o *PutServiceIDParams) WithID(id int64) *PutServiceIDParams { - o.SetID(id) - return o -} - -// SetID adds the id to the put service ID params -func (o *PutServiceIDParams) SetID(id int64) { - o.ID = id -} - -// WriteToRequest writes these params to a swagger request -func (o *PutServiceIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - if o.Config != nil { - if err := r.SetBodyParam(o.Config); err != nil { - return err - } - } - - // path param id - if err := r.SetPathParam("id", swag.FormatInt64(o.ID)); err != nil { - return err - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go b/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go deleted file mode 100644 index 39859cd6e..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/put_service_id_responses.go +++ /dev/null @@ -1,414 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/models" -) - -// PutServiceIDReader is a Reader for the PutServiceID structure. -type PutServiceIDReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PutServiceIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPutServiceIDOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 201: - result := NewPutServiceIDCreated() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 460: - result := NewPutServiceIDInvalidFrontend() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 461: - result := NewPutServiceIDInvalidBackend() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 500: - result := NewPutServiceIDFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - case 501: - result := NewPutServiceIDUpdateBackendFailure() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPutServiceIDOK creates a PutServiceIDOK with default headers values -func NewPutServiceIDOK() *PutServiceIDOK { - return &PutServiceIDOK{} -} - -/* -PutServiceIDOK describes a response with status code 200, with default header values. - -Updated -*/ -type PutServiceIDOK struct { -} - -// IsSuccess returns true when this put service Id o k response has a 2xx status code -func (o *PutServiceIDOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this put service Id o k response has a 3xx status code -func (o *PutServiceIDOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put service Id o k response has a 4xx status code -func (o *PutServiceIDOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this put service Id o k response has a 5xx status code -func (o *PutServiceIDOK) IsServerError() bool { - return false -} - -// IsCode returns true when this put service Id o k response a status code equal to that given -func (o *PutServiceIDOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PutServiceIDOK) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK ", 200) -} - -func (o *PutServiceIDOK) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdOK ", 200) -} - -func (o *PutServiceIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPutServiceIDCreated creates a PutServiceIDCreated with default headers values -func NewPutServiceIDCreated() *PutServiceIDCreated { - return &PutServiceIDCreated{} -} - -/* -PutServiceIDCreated describes a response with status code 201, with default header values. - -Created -*/ -type PutServiceIDCreated struct { -} - -// IsSuccess returns true when this put service Id created response has a 2xx status code -func (o *PutServiceIDCreated) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this put service Id created response has a 3xx status code -func (o *PutServiceIDCreated) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put service Id created response has a 4xx status code -func (o *PutServiceIDCreated) IsClientError() bool { - return false -} - -// IsServerError returns true when this put service Id created response has a 5xx status code -func (o *PutServiceIDCreated) IsServerError() bool { - return false -} - -// IsCode returns true when this put service Id created response a status code equal to that given -func (o *PutServiceIDCreated) IsCode(code int) bool { - return code == 201 -} - -func (o *PutServiceIDCreated) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated ", 201) -} - -func (o *PutServiceIDCreated) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdCreated ", 201) -} - -func (o *PutServiceIDCreated) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - return nil -} - -// NewPutServiceIDInvalidFrontend creates a PutServiceIDInvalidFrontend with default headers values -func NewPutServiceIDInvalidFrontend() *PutServiceIDInvalidFrontend { - return &PutServiceIDInvalidFrontend{} -} - -/* -PutServiceIDInvalidFrontend describes a response with status code 460, with default header values. - -Invalid frontend in service configuration -*/ -type PutServiceIDInvalidFrontend struct { - Payload models.Error -} - -// IsSuccess returns true when this put service Id invalid frontend response has a 2xx status code -func (o *PutServiceIDInvalidFrontend) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put service Id invalid frontend response has a 3xx status code -func (o *PutServiceIDInvalidFrontend) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put service Id invalid frontend response has a 4xx status code -func (o *PutServiceIDInvalidFrontend) IsClientError() bool { - return true -} - -// IsServerError returns true when this put service Id invalid frontend response has a 5xx status code -func (o *PutServiceIDInvalidFrontend) IsServerError() bool { - return false -} - -// IsCode returns true when this put service Id invalid frontend response a status code equal to that given -func (o *PutServiceIDInvalidFrontend) IsCode(code int) bool { - return code == 460 -} - -func (o *PutServiceIDInvalidFrontend) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %+v", 460, o.Payload) -} - -func (o *PutServiceIDInvalidFrontend) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidFrontend %+v", 460, o.Payload) -} - -func (o *PutServiceIDInvalidFrontend) GetPayload() models.Error { - return o.Payload -} - -func (o *PutServiceIDInvalidFrontend) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutServiceIDInvalidBackend creates a PutServiceIDInvalidBackend with default headers values -func NewPutServiceIDInvalidBackend() *PutServiceIDInvalidBackend { - return &PutServiceIDInvalidBackend{} -} - -/* -PutServiceIDInvalidBackend describes a response with status code 461, with default header values. - -Invalid backend in service configuration -*/ -type PutServiceIDInvalidBackend struct { - Payload models.Error -} - -// IsSuccess returns true when this put service Id invalid backend response has a 2xx status code -func (o *PutServiceIDInvalidBackend) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put service Id invalid backend response has a 3xx status code -func (o *PutServiceIDInvalidBackend) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put service Id invalid backend response has a 4xx status code -func (o *PutServiceIDInvalidBackend) IsClientError() bool { - return true -} - -// IsServerError returns true when this put service Id invalid backend response has a 5xx status code -func (o *PutServiceIDInvalidBackend) IsServerError() bool { - return false -} - -// IsCode returns true when this put service Id invalid backend response a status code equal to that given -func (o *PutServiceIDInvalidBackend) IsCode(code int) bool { - return code == 461 -} - -func (o *PutServiceIDInvalidBackend) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %+v", 461, o.Payload) -} - -func (o *PutServiceIDInvalidBackend) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdInvalidBackend %+v", 461, o.Payload) -} - -func (o *PutServiceIDInvalidBackend) GetPayload() models.Error { - return o.Payload -} - -func (o *PutServiceIDInvalidBackend) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutServiceIDFailure creates a PutServiceIDFailure with default headers values -func NewPutServiceIDFailure() *PutServiceIDFailure { - return &PutServiceIDFailure{} -} - -/* -PutServiceIDFailure describes a response with status code 500, with default header values. - -Error while creating service -*/ -type PutServiceIDFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this put service Id failure response has a 2xx status code -func (o *PutServiceIDFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put service Id failure response has a 3xx status code -func (o *PutServiceIDFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put service Id failure response has a 4xx status code -func (o *PutServiceIDFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this put service Id failure response has a 5xx status code -func (o *PutServiceIDFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this put service Id failure response a status code equal to that given -func (o *PutServiceIDFailure) IsCode(code int) bool { - return code == 500 -} - -func (o *PutServiceIDFailure) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %+v", 500, o.Payload) -} - -func (o *PutServiceIDFailure) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdFailure %+v", 500, o.Payload) -} - -func (o *PutServiceIDFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PutServiceIDFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutServiceIDUpdateBackendFailure creates a PutServiceIDUpdateBackendFailure with default headers values -func NewPutServiceIDUpdateBackendFailure() *PutServiceIDUpdateBackendFailure { - return &PutServiceIDUpdateBackendFailure{} -} - -/* -PutServiceIDUpdateBackendFailure describes a response with status code 501, with default header values. - -Error while updating backend states -*/ -type PutServiceIDUpdateBackendFailure struct { - Payload models.Error -} - -// IsSuccess returns true when this put service Id update backend failure response has a 2xx status code -func (o *PutServiceIDUpdateBackendFailure) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put service Id update backend failure response has a 3xx status code -func (o *PutServiceIDUpdateBackendFailure) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put service Id update backend failure response has a 4xx status code -func (o *PutServiceIDUpdateBackendFailure) IsClientError() bool { - return false -} - -// IsServerError returns true when this put service Id update backend failure response has a 5xx status code -func (o *PutServiceIDUpdateBackendFailure) IsServerError() bool { - return true -} - -// IsCode returns true when this put service Id update backend failure response a status code equal to that given -func (o *PutServiceIDUpdateBackendFailure) IsCode(code int) bool { - return code == 501 -} - -func (o *PutServiceIDUpdateBackendFailure) Error() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %+v", 501, o.Payload) -} - -func (o *PutServiceIDUpdateBackendFailure) String() string { - return fmt.Sprintf("[PUT /service/{id}][%d] putServiceIdUpdateBackendFailure %+v", 501, o.Payload) -} - -func (o *PutServiceIDUpdateBackendFailure) GetPayload() models.Error { - return o.Payload -} - -func (o *PutServiceIDUpdateBackendFailure) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go b/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go deleted file mode 100644 index 332799a98..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/client/service/service_client.go +++ /dev/null @@ -1,243 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package service - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new service API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for service API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - DeleteServiceID(params *DeleteServiceIDParams, opts ...ClientOption) (*DeleteServiceIDOK, error) - - GetLrp(params *GetLrpParams, opts ...ClientOption) (*GetLrpOK, error) - - GetService(params *GetServiceParams, opts ...ClientOption) (*GetServiceOK, error) - - GetServiceID(params *GetServiceIDParams, opts ...ClientOption) (*GetServiceIDOK, error) - - PutServiceID(params *PutServiceIDParams, opts ...ClientOption) (*PutServiceIDOK, *PutServiceIDCreated, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* -DeleteServiceID deletes a service -*/ -func (a *Client) DeleteServiceID(params *DeleteServiceIDParams, opts ...ClientOption) (*DeleteServiceIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewDeleteServiceIDParams() - } - op := &runtime.ClientOperation{ - ID: "DeleteServiceID", - Method: "DELETE", - PathPattern: "/service/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &DeleteServiceIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*DeleteServiceIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for DeleteServiceID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetLrp retrieves list of all local redirect policies -*/ -func (a *Client) GetLrp(params *GetLrpParams, opts ...ClientOption) (*GetLrpOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetLrpParams() - } - op := &runtime.ClientOperation{ - ID: "GetLrp", - Method: "GET", - PathPattern: "/lrp", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetLrpReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetLrpOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetLrp: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetService retrieves list of all services -*/ -func (a *Client) GetService(params *GetServiceParams, opts ...ClientOption) (*GetServiceOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetServiceParams() - } - op := &runtime.ClientOperation{ - ID: "GetService", - Method: "GET", - PathPattern: "/service", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetServiceReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetServiceOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetService: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -GetServiceID retrieves configuration of a service -*/ -func (a *Client) GetServiceID(params *GetServiceIDParams, opts ...ClientOption) (*GetServiceIDOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetServiceIDParams() - } - op := &runtime.ClientOperation{ - ID: "GetServiceID", - Method: "GET", - PathPattern: "/service/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetServiceIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetServiceIDOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetServiceID: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* -PutServiceID creates or update service -*/ -func (a *Client) PutServiceID(params *PutServiceIDParams, opts ...ClientOption) (*PutServiceIDOK, *PutServiceIDCreated, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPutServiceIDParams() - } - op := &runtime.ClientOperation{ - ID: "PutServiceID", - Method: "PUT", - PathPattern: "/service/{id}", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PutServiceIDReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, nil, err - } - switch value := result.(type) { - case *PutServiceIDOK: - return value, nil, nil - case *PutServiceIDCreated: - return nil, value, nil - } - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for service: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/README.md b/vendor/github.com/cilium/cilium/api/v1/flow/README.md index b99ccb394..bf1bb53ef 100644 --- a/vendor/github.com/cilium/cilium/api/v1/flow/README.md +++ b/vendor/github.com/cilium/cilium/api/v1/flow/README.md @@ -42,6 +42,7 @@ - [Workload](#flow-Workload) - [AgentEventType](#flow-AgentEventType) + - [AuthType](#flow-AuthType) - [DebugCapturePoint](#flow-DebugCapturePoint) - [DebugEventType](#flow-DebugEventType) - [DropReason](#flow-DropReason) @@ -267,6 +268,7 @@ EventTypeFilter is a filter describing a particular event type | uuid | [string](#string) | | uuid is a universally unique identifier for this flow. | | verdict | [Verdict](#flow-Verdict) | | | | drop_reason | [uint32](#uint32) | | **Deprecated.** only applicable to Verdict = DROPPED. deprecated in favor of drop_reason_desc. | +| auth_type | [AuthType](#flow-AuthType) | | auth_type is the authentication type specified for the flow in Cilium Network Policy. Only set on policy verdict events. | | ethernet | [Ethernet](#flow-Ethernet) | | l2 | | IP | [IP](#flow-IP) | | l3 | | l4 | [Layer4](#flow-Layer4) | | l4 | @@ -794,6 +796,19 @@ AgentNotification in pkg/monitor/api/types.go + + +### AuthType +These types correspond to definitions in pkg/policy/l4.go + +| Name | Number | Description | +| ---- | ------ | ----------- | +| DISABLED | 0 | | +| SPIRE | 1 | | +| TEST_ALWAYS_FAIL | 2 | | + + + ### DebugCapturePoint @@ -960,6 +975,8 @@ here. | CT_NO_MAP_FOUND | 190 | | | SNAT_NO_MAP_FOUND | 191 | | | INVALID_CLUSTER_ID | 192 | | +| UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP | 193 | | +| NO_EGRESS_GATEWAY | 194 | | diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go index fadd50044..816a98515 100644 --- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.1 +// protoc-gen-go v1.30.0 +// protoc v4.22.3 // source: flow/flow.proto package flow @@ -77,6 +77,56 @@ func (FlowType) EnumDescriptor() ([]byte, []int) { return file_flow_flow_proto_rawDescGZIP(), []int{0} } +// These types correspond to definitions in pkg/policy/l4.go +type AuthType int32 + +const ( + AuthType_DISABLED AuthType = 0 + AuthType_SPIRE AuthType = 1 + AuthType_TEST_ALWAYS_FAIL AuthType = 2 +) + +// Enum value maps for AuthType. +var ( + AuthType_name = map[int32]string{ + 0: "DISABLED", + 1: "SPIRE", + 2: "TEST_ALWAYS_FAIL", + } + AuthType_value = map[string]int32{ + "DISABLED": 0, + "SPIRE": 1, + "TEST_ALWAYS_FAIL": 2, + } +) + +func (x AuthType) Enum() *AuthType { + p := new(AuthType) + *p = x + return p +} + +func (x AuthType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuthType) Descriptor() protoreflect.EnumDescriptor { + return file_flow_flow_proto_enumTypes[1].Descriptor() +} + +func (AuthType) Type() protoreflect.EnumType { + return &file_flow_flow_proto_enumTypes[1] +} + +func (x AuthType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuthType.Descriptor instead. +func (AuthType) EnumDescriptor() ([]byte, []int) { + return file_flow_flow_proto_rawDescGZIP(), []int{1} +} + type TraceObservationPoint int32 const ( @@ -165,11 +215,11 @@ func (x TraceObservationPoint) String() string { } func (TraceObservationPoint) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[1].Descriptor() + return file_flow_flow_proto_enumTypes[2].Descriptor() } func (TraceObservationPoint) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[1] + return &file_flow_flow_proto_enumTypes[2] } func (x TraceObservationPoint) Number() protoreflect.EnumNumber { @@ -178,7 +228,7 @@ func (x TraceObservationPoint) Number() protoreflect.EnumNumber { // Deprecated: Use TraceObservationPoint.Descriptor instead. func (TraceObservationPoint) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{1} + return file_flow_flow_proto_rawDescGZIP(), []int{2} } // This enum corresponds to Cilium's L7 accesslog FlowType: @@ -220,11 +270,11 @@ func (x L7FlowType) String() string { } func (L7FlowType) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[2].Descriptor() + return file_flow_flow_proto_enumTypes[3].Descriptor() } func (L7FlowType) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[2] + return &file_flow_flow_proto_enumTypes[3] } func (x L7FlowType) Number() protoreflect.EnumNumber { @@ -233,7 +283,7 @@ func (x L7FlowType) Number() protoreflect.EnumNumber { // Deprecated: Use L7FlowType.Descriptor instead. func (L7FlowType) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{2} + return file_flow_flow_proto_rawDescGZIP(), []int{3} } type IPVersion int32 @@ -269,11 +319,11 @@ func (x IPVersion) String() string { } func (IPVersion) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[3].Descriptor() + return file_flow_flow_proto_enumTypes[4].Descriptor() } func (IPVersion) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[3] + return &file_flow_flow_proto_enumTypes[4] } func (x IPVersion) Number() protoreflect.EnumNumber { @@ -282,7 +332,7 @@ func (x IPVersion) Number() protoreflect.EnumNumber { // Deprecated: Use IPVersion.Descriptor instead. func (IPVersion) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{3} + return file_flow_flow_proto_rawDescGZIP(), []int{4} } type Verdict int32 @@ -347,11 +397,11 @@ func (x Verdict) String() string { } func (Verdict) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[4].Descriptor() + return file_flow_flow_proto_enumTypes[5].Descriptor() } func (Verdict) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[4] + return &file_flow_flow_proto_enumTypes[5] } func (x Verdict) Number() protoreflect.EnumNumber { @@ -360,7 +410,7 @@ func (x Verdict) Number() protoreflect.EnumNumber { // Deprecated: Use Verdict.Descriptor instead. func (Verdict) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{4} + return file_flow_flow_proto_rawDescGZIP(), []int{5} } // These values are shared with pkg/monitor/api/drop.go and bpf/lib/common.h. @@ -434,6 +484,8 @@ const ( DropReason_CT_NO_MAP_FOUND DropReason = 190 DropReason_SNAT_NO_MAP_FOUND DropReason = 191 DropReason_INVALID_CLUSTER_ID DropReason = 192 + DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP DropReason = 193 + DropReason_NO_EGRESS_GATEWAY DropReason = 194 ) // Enum value maps for DropReason. @@ -502,6 +554,8 @@ var ( 190: "CT_NO_MAP_FOUND", 191: "SNAT_NO_MAP_FOUND", 192: "INVALID_CLUSTER_ID", + 193: "UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP", + 194: "NO_EGRESS_GATEWAY", } DropReason_value = map[string]int32{ "DROP_REASON_UNKNOWN": 0, @@ -567,6 +621,8 @@ var ( "CT_NO_MAP_FOUND": 190, "SNAT_NO_MAP_FOUND": 191, "INVALID_CLUSTER_ID": 192, + "UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP": 193, + "NO_EGRESS_GATEWAY": 194, } ) @@ -581,11 +637,11 @@ func (x DropReason) String() string { } func (DropReason) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[5].Descriptor() + return file_flow_flow_proto_enumTypes[6].Descriptor() } func (DropReason) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[5] + return &file_flow_flow_proto_enumTypes[6] } func (x DropReason) Number() protoreflect.EnumNumber { @@ -594,7 +650,7 @@ func (x DropReason) Number() protoreflect.EnumNumber { // Deprecated: Use DropReason.Descriptor instead. func (DropReason) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{5} + return file_flow_flow_proto_rawDescGZIP(), []int{6} } type TrafficDirection int32 @@ -630,11 +686,11 @@ func (x TrafficDirection) String() string { } func (TrafficDirection) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[6].Descriptor() + return file_flow_flow_proto_enumTypes[7].Descriptor() } func (TrafficDirection) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[6] + return &file_flow_flow_proto_enumTypes[7] } func (x TrafficDirection) Number() protoreflect.EnumNumber { @@ -643,7 +699,7 @@ func (x TrafficDirection) Number() protoreflect.EnumNumber { // Deprecated: Use TrafficDirection.Descriptor instead. func (TrafficDirection) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{6} + return file_flow_flow_proto_rawDescGZIP(), []int{7} } // These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. @@ -698,11 +754,11 @@ func (x DebugCapturePoint) String() string { } func (DebugCapturePoint) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[7].Descriptor() + return file_flow_flow_proto_enumTypes[8].Descriptor() } func (DebugCapturePoint) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[7] + return &file_flow_flow_proto_enumTypes[8] } func (x DebugCapturePoint) Number() protoreflect.EnumNumber { @@ -711,7 +767,7 @@ func (x DebugCapturePoint) Number() protoreflect.EnumNumber { // Deprecated: Use DebugCapturePoint.Descriptor instead. func (DebugCapturePoint) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{7} + return file_flow_flow_proto_rawDescGZIP(), []int{8} } // EventType are constants are based on the ones from . @@ -750,11 +806,11 @@ func (x EventType) String() string { } func (EventType) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[8].Descriptor() + return file_flow_flow_proto_enumTypes[9].Descriptor() } func (EventType) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[8] + return &file_flow_flow_proto_enumTypes[9] } func (x EventType) Number() protoreflect.EnumNumber { @@ -763,7 +819,7 @@ func (x EventType) Number() protoreflect.EnumNumber { // Deprecated: Use EventType.Descriptor instead. func (EventType) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{8} + return file_flow_flow_proto_rawDescGZIP(), []int{9} } type LostEventSource int32 @@ -810,11 +866,11 @@ func (x LostEventSource) String() string { } func (LostEventSource) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[9].Descriptor() + return file_flow_flow_proto_enumTypes[10].Descriptor() } func (LostEventSource) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[9] + return &file_flow_flow_proto_enumTypes[10] } func (x LostEventSource) Number() protoreflect.EnumNumber { @@ -823,7 +879,7 @@ func (x LostEventSource) Number() protoreflect.EnumNumber { // Deprecated: Use LostEventSource.Descriptor instead. func (LostEventSource) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{9} + return file_flow_flow_proto_rawDescGZIP(), []int{10} } // AgentEventType is the type of agent event. These values are shared with type @@ -888,11 +944,11 @@ func (x AgentEventType) String() string { } func (AgentEventType) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[10].Descriptor() + return file_flow_flow_proto_enumTypes[11].Descriptor() } func (AgentEventType) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[10] + return &file_flow_flow_proto_enumTypes[11] } func (x AgentEventType) Number() protoreflect.EnumNumber { @@ -901,7 +957,7 @@ func (x AgentEventType) Number() protoreflect.EnumNumber { // Deprecated: Use AgentEventType.Descriptor instead. func (AgentEventType) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{10} + return file_flow_flow_proto_rawDescGZIP(), []int{11} } // This mirrors enum xlate_point in bpf/lib/trace_sock.h @@ -944,11 +1000,11 @@ func (x SocketTranslationPoint) String() string { } func (SocketTranslationPoint) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[11].Descriptor() + return file_flow_flow_proto_enumTypes[12].Descriptor() } func (SocketTranslationPoint) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[11] + return &file_flow_flow_proto_enumTypes[12] } func (x SocketTranslationPoint) Number() protoreflect.EnumNumber { @@ -957,7 +1013,7 @@ func (x SocketTranslationPoint) Number() protoreflect.EnumNumber { // Deprecated: Use SocketTranslationPoint.Descriptor instead. func (SocketTranslationPoint) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{11} + return file_flow_flow_proto_rawDescGZIP(), []int{12} } // These values are shared with pkg/monitor/api/datapath_debug.go and bpf/lib/dbg.h. @@ -1180,11 +1236,11 @@ func (x DebugEventType) String() string { } func (DebugEventType) Descriptor() protoreflect.EnumDescriptor { - return file_flow_flow_proto_enumTypes[12].Descriptor() + return file_flow_flow_proto_enumTypes[13].Descriptor() } func (DebugEventType) Type() protoreflect.EnumType { - return &file_flow_flow_proto_enumTypes[12] + return &file_flow_flow_proto_enumTypes[13] } func (x DebugEventType) Number() protoreflect.EnumNumber { @@ -1193,7 +1249,7 @@ func (x DebugEventType) Number() protoreflect.EnumNumber { // Deprecated: Use DebugEventType.Descriptor instead. func (DebugEventType) EnumDescriptor() ([]byte, []int) { - return file_flow_flow_proto_rawDescGZIP(), []int{12} + return file_flow_flow_proto_rawDescGZIP(), []int{13} } type Flow struct { @@ -1208,8 +1264,11 @@ type Flow struct { // only applicable to Verdict = DROPPED. // deprecated in favor of drop_reason_desc. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in flow/flow.proto. DropReason uint32 `protobuf:"varint,3,opt,name=drop_reason,json=dropReason,proto3" json:"drop_reason,omitempty"` + // auth_type is the authentication type specified for the flow in Cilium Network Policy. + // Only set on policy verdict events. + AuthType AuthType `protobuf:"varint,35,opt,name=auth_type,json=authType,proto3,enum=flow.AuthType" json:"auth_type,omitempty"` // l2 Ethernet *Ethernet `protobuf:"bytes,4,opt,name=ethernet,proto3" json:"ethernet,omitempty"` // l3 @@ -1231,7 +1290,7 @@ type Flow struct { // able to distinguish between the value being false or it being absent. // Please use is_reply instead. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in flow/flow.proto. Reply bool `protobuf:"varint,16,opt,name=reply,proto3" json:"reply,omitempty"` // EventType of the originating Cilium event EventType *CiliumEventType `protobuf:"bytes,19,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` @@ -1274,7 +1333,7 @@ type Flow struct { // duplicating logic from the old parser. This field will be removed once we // fully migrate to the new parser. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in flow/flow.proto. Summary string `protobuf:"bytes,100000,opt,name=Summary,proto3" json:"Summary,omitempty"` } @@ -1331,7 +1390,7 @@ func (x *Flow) GetVerdict() Verdict { return Verdict_VERDICT_UNKNOWN } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in flow/flow.proto. func (x *Flow) GetDropReason() uint32 { if x != nil { return x.DropReason @@ -1339,6 +1398,13 @@ func (x *Flow) GetDropReason() uint32 { return 0 } +func (x *Flow) GetAuthType() AuthType { + if x != nil { + return x.AuthType + } + return AuthType_DISABLED +} + func (x *Flow) GetEthernet() *Ethernet { if x != nil { return x.Ethernet @@ -1409,7 +1475,7 @@ func (x *Flow) GetL7() *Layer7 { return nil } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in flow/flow.proto. func (x *Flow) GetReply() bool { if x != nil { return x.Reply @@ -1522,7 +1588,7 @@ func (x *Flow) GetCgroupId() uint64 { return 0 } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in flow/flow.proto. func (x *Flow) GetSummary() string { if x != nil { return x.Summary @@ -4109,7 +4175,7 @@ type ServiceUpsertNotification struct { FrontendAddress *ServiceUpsertNotificationAddr `protobuf:"bytes,2,opt,name=frontend_address,json=frontendAddress,proto3" json:"frontend_address,omitempty"` BackendAddresses []*ServiceUpsertNotificationAddr `protobuf:"bytes,3,rep,name=backend_addresses,json=backendAddresses,proto3" json:"backend_addresses,omitempty"` Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in flow/flow.proto. TrafficPolicy string `protobuf:"bytes,5,opt,name=traffic_policy,json=trafficPolicy,proto3" json:"traffic_policy,omitempty"` Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` Namespace string `protobuf:"bytes,7,opt,name=namespace,proto3" json:"namespace,omitempty"` @@ -4177,7 +4243,7 @@ func (x *ServiceUpsertNotification) GetType() string { return "" } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in flow/flow.proto. func (x *ServiceUpsertNotification) GetTrafficPolicy() string { if x != nil { return x.TrafficPolicy @@ -4426,7 +4492,7 @@ var file_flow_flow_proto_rawDesc = []byte{ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x0b, 0x0a, 0x04, 0x46, 0x6c, 0x6f, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xca, 0x0b, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, @@ -4436,788 +4502,799 @@ var file_flow_flow_proto_rawDesc = []byte{ 0x72, 0x64, 0x69, 0x63, 0x74, 0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0b, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x74, 0x68, - 0x65, 0x72, 0x6e, 0x65, 0x74, 0x52, 0x08, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, - 0x18, 0x0a, 0x02, 0x49, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x52, 0x02, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x34, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, - 0x65, 0x72, 0x34, 0x52, 0x02, 0x6c, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x30, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, + 0x73, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x23, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x75, + 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, + 0x65, 0x74, 0x52, 0x08, 0x65, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x02, + 0x49, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x49, 0x50, 0x52, 0x02, 0x49, 0x50, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x34, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, + 0x52, 0x02, 0x6c, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x02, 0x6c, 0x37, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x37, 0x52, 0x02, 0x6c, 0x37, - 0x12, 0x18, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x34, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3e, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, - 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, - 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, - 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x15, 0x74, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x10, - 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x63, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x72, - 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x44, 0x65, 0x73, 0x63, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, - 0x65, 0x70, 0x6c, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, - 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x69, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x47, 0x0a, 0x13, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, - 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, - 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x11, 0x64, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, - 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, - 0x61, 0x63, 0x65, 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, - 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1e, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x6f, 0x63, 0x6b, 0x5f, 0x78, - 0x6c, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, - 0x73, 0x6f, 0x63, 0x6b, 0x58, 0x6c, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, - 0x0a, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, - 0x20, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6f, - 0x6b, 0x69, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, - 0x12, 0x1e, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0xa0, 0x8d, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x11, - 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xc4, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, - 0x65, 0x72, 0x34, 0x12, 0x1d, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x48, 0x00, 0x52, 0x03, 0x54, - 0x43, 0x50, 0x12, 0x1d, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x55, 0x44, 0x50, 0x48, 0x00, 0x52, 0x03, 0x55, 0x44, - 0x50, 0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x48, - 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, - 0x50, 0x76, 0x36, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, - 0x36, 0x12, 0x20, 0x0a, 0x04, 0x53, 0x43, 0x54, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x43, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x53, - 0x43, 0x54, 0x50, 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, - 0xbd, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x37, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4e, 0x73, 0x12, - 0x1d, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x4e, 0x53, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x20, - 0x0a, 0x04, 0x68, 0x74, 0x74, 0x70, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, - 0x12, 0x23, 0x0a, 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x48, 0x00, 0x52, 0x05, - 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, - 0x39, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, - 0x29, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x11, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x28, 0x0a, 0x0b, 0x54, 0x72, - 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x22, 0xb5, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x49, - 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a, - 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, - 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, - 0x64, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, 0x32, 0x0a, 0x08, - 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x22, 0x77, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x6f, 0x72, 0x74, 0x12, 0x24, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, - 0x67, 0x73, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x02, 0x49, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, + 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, + 0x1c, 0x0a, 0x02, 0x6c, 0x37, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x37, 0x52, 0x02, 0x6c, 0x37, 0x12, 0x18, 0x0a, + 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x66, 0x6c, + 0x6f, 0x77, 0x2e, 0x43, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x3e, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, + 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, + 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x17, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x53, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, + 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, + 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, + 0x6e, 0x74, 0x52, 0x15, 0x74, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x10, 0x64, 0x72, 0x6f, + 0x70, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x18, 0x19, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x0e, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x44, 0x65, 0x73, 0x63, 0x12, 0x35, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, + 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x07, 0x69, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x47, 0x0a, 0x13, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x63, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, + 0x6e, 0x74, 0x52, 0x11, 0x64, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, + 0x52, 0x09, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, + 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0d, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x10, 0x73, 0x6f, 0x63, 0x6b, 0x5f, 0x78, 0x6c, 0x61, 0x74, + 0x65, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x0e, 0x73, 0x6f, 0x63, + 0x6b, 0x58, 0x6c, 0x61, 0x74, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x63, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x20, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0c, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x21, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1e, 0x0a, + 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0xa0, 0x8d, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x4a, 0x04, 0x08, + 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, + 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xc4, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x34, + 0x12, 0x1d, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x48, 0x00, 0x52, 0x03, 0x54, 0x43, 0x50, 0x12, + 0x1d, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x55, 0x44, 0x50, 0x48, 0x00, 0x52, 0x03, 0x55, 0x44, 0x50, 0x12, 0x26, + 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x48, 0x00, 0x52, 0x06, + 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x26, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x43, + 0x4d, 0x50, 0x76, 0x36, 0x48, 0x00, 0x52, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x20, + 0x0a, 0x04, 0x53, 0x43, 0x54, 0x50, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x43, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x53, 0x43, 0x54, 0x50, + 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xbd, 0x01, 0x0a, + 0x06, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x37, 0x12, 0x24, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x37, 0x46, + 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x09, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4e, 0x73, 0x12, 0x1d, 0x0a, 0x03, + 0x64, 0x6e, 0x73, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x44, 0x4e, 0x53, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x20, 0x0a, 0x04, 0x68, + 0x74, 0x74, 0x70, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x00, 0x52, 0x04, 0x68, 0x74, 0x74, 0x70, 0x12, 0x23, 0x0a, + 0x05, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x48, 0x00, 0x52, 0x05, 0x6b, 0x61, 0x66, + 0x6b, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0x39, 0x0a, 0x0c, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x29, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x22, 0x28, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, + 0x64, 0x22, 0xb5, 0x01, 0x0a, 0x08, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x49, 0x44, 0x12, 0x1a, + 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x09, 0x77, + 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, + 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x22, 0x32, 0x0a, 0x08, 0x57, 0x6f, 0x72, + 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x77, 0x0a, + 0x03, 0x54, 0x43, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, + 0x12, 0x24, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x52, + 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x22, 0x8b, 0x01, 0x0a, 0x02, 0x49, 0x50, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, 0x70, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x6e, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x65, 0x64, 0x22, 0x44, 0x0a, 0x08, 0x45, 0x74, 0x68, 0x65, 0x72, 0x6e, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x09, 0x69, 0x70, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, - 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x65, 0x64, 0x22, 0x44, 0x0a, 0x08, 0x45, 0x74, 0x68, 0x65, 0x72, - 0x6e, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x01, - 0x0a, 0x08, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x46, 0x49, - 0x4e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x46, 0x49, 0x4e, 0x12, 0x10, 0x0a, 0x03, - 0x53, 0x59, 0x4e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x53, 0x59, 0x4e, 0x12, 0x10, - 0x0a, 0x03, 0x52, 0x53, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x52, 0x53, 0x54, - 0x12, 0x10, 0x0a, 0x03, 0x50, 0x53, 0x48, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x50, - 0x53, 0x48, 0x12, 0x10, 0x0a, 0x03, 0x41, 0x43, 0x4b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x03, 0x41, 0x43, 0x4b, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x47, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x03, 0x55, 0x52, 0x47, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x43, 0x45, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x03, 0x45, 0x43, 0x45, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x57, 0x52, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x43, 0x57, 0x52, 0x12, 0x0e, 0x0a, 0x02, 0x4e, 0x53, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x02, 0x4e, 0x53, 0x22, 0x51, 0x0a, 0x03, 0x55, 0x44, - 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, - 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x52, 0x0a, - 0x04, 0x53, 0x43, 0x54, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x54, + 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x46, 0x49, 0x4e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x46, 0x49, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x53, 0x59, 0x4e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x53, 0x59, 0x4e, 0x12, 0x10, 0x0a, 0x03, 0x52, + 0x53, 0x54, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x52, 0x53, 0x54, 0x12, 0x10, 0x0a, + 0x03, 0x50, 0x53, 0x48, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x50, 0x53, 0x48, 0x12, + 0x10, 0x0a, 0x03, 0x41, 0x43, 0x4b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x41, 0x43, + 0x4b, 0x12, 0x10, 0x0a, 0x03, 0x55, 0x52, 0x47, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, + 0x55, 0x52, 0x47, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x43, 0x45, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x03, 0x45, 0x43, 0x45, 0x12, 0x10, 0x0a, 0x03, 0x43, 0x57, 0x52, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x03, 0x43, 0x57, 0x52, 0x12, 0x0e, 0x0a, 0x02, 0x4e, 0x53, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x02, 0x4e, 0x53, 0x22, 0x51, 0x0a, 0x03, 0x55, 0x44, 0x50, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, + 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x52, 0x0a, 0x04, 0x53, 0x43, + 0x54, 0x50, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x30, + 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x22, 0x30, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x22, 0x66, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, 0x0f, 0x43, 0x69, + 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0xb4, 0x09, 0x0a, + 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, + 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, + 0x70, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x64, 0x12, + 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x71, 0x64, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, + 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, + 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x43, 0x0a, 0x11, 0x74, + 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x72, + 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, + 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0e, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, 0x74, 0x70, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72, - 0x74, 0x22, 0x30, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x34, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, - 0x6f, 0x64, 0x65, 0x22, 0x30, 0x0a, 0x06, 0x49, 0x43, 0x4d, 0x50, 0x76, 0x36, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x66, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x53, 0x75, 0x62, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, 0x40, 0x0a, - 0x0f, 0x43, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x22, - 0xb4, 0x09, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, - 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, - 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, - 0x1d, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x64, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x07, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x71, 0x64, 0x6e, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, - 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x1a, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, - 0x61, 0x64, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, - 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x70, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x12, 0x27, 0x0a, 0x0f, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x6f, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x66, 0x71, 0x64, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x71, 0x64, 0x6e, 0x12, 0x2b, 0x0a, - 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x18, 0x11, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x14, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, - 0x6f, 0x61, 0x64, 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x43, - 0x0a, 0x11, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x10, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x56, 0x65, 0x72, 0x64, - 0x69, 0x63, 0x74, 0x52, 0x07, 0x76, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x34, 0x0a, 0x0a, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x68, 0x74, - 0x74, 0x70, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0e, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x50, 0x6f, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x0f, 0x20, - 0x03, 0x28, 0x08, 0x52, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, - 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x64, - 0x6e, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, - 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x31, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x13, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x18, 0x15, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, - 0x68, 0x12, 0x2b, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x17, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, - 0x6c, 0x61, 0x67, 0x73, 0x52, 0x08, 0x74, 0x63, 0x70, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, - 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0a, 0x69, - 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x69, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x22, 0xce, 0x01, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x12, 0x14, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x03, 0x69, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x72, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, - 0x72, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, - 0x72, 0x72, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x0a, 0x48, 0x54, 0x54, 0x50, 0x48, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8c, 0x01, - 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x75, 0x72, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, - 0x12, 0x2a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x9d, 0x01, 0x0a, - 0x05, 0x4b, 0x61, 0x66, 0x6b, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, - 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, - 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x3b, 0x0a, 0x07, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x4c, 0x6f, - 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, - 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x6c, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0d, 0x6e, 0x75, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4c, 0x6f, 0x73, 0x74, 0x12, 0x2d, - 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, - 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x22, 0xf6, 0x04, - 0x0a, 0x0a, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, - 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, - 0x48, 0x00, 0x52, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x39, 0x0a, 0x0b, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, - 0x0c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, - 0x13, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x12, 0x65, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, - 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x42, - 0x0a, 0x0e, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, - 0x43, 0x61, 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x70, - 0x73, 0x65, 0x72, 0x74, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, - 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x12, 0x48, 0x0a, 0x0e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x6b, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x11, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x22, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x6d, 0x0a, 0x18, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, - 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, - 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x75, 0x6c, - 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x19, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x08, + 0x52, 0x05, 0x72, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x18, 0x12, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x0e, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x31, 0x0a, + 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x13, 0x64, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, + 0x15, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x16, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2b, + 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x43, 0x50, 0x46, 0x6c, 0x61, 0x67, + 0x73, 0x52, 0x08, 0x74, 0x63, 0x70, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x69, + 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x49, 0x64, 0x22, 0xce, 0x01, 0x0a, 0x03, 0x44, 0x4e, 0x53, 0x12, 0x14, 0x0a, 0x05, 0x71, + 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, + 0x69, 0x70, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x2d, 0x0a, + 0x12, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x06, 0x71, 0x74, 0x79, 0x70, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x72, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x72, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x0a, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8c, 0x01, 0x0a, 0x04, 0x48, + 0x54, 0x54, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2a, 0x0a, + 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x05, 0x4b, 0x61, + 0x66, 0x6b, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, + 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, + 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x22, 0x3b, 0x0a, 0x07, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x73, 0x74, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x5f, 0x6c, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, + 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x4c, 0x6f, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x03, 0x63, + 0x70, 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x22, 0xf6, 0x04, 0x0a, 0x0a, 0x41, + 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, + 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x64, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, + 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x48, 0x00, 0x52, + 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x18, 0x66, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x13, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x12, 0x65, 0x6e, 0x64, 0x70, + 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x4b, + 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, + 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x69, + 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x69, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x49, 0x50, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, + 0x52, 0x0d, 0x69, 0x70, 0x63, 0x61, 0x63, 0x68, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, + 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x75, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x12, 0x48, 0x0a, 0x0e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x6b, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0x4b, 0x0a, 0x11, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x0c, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x42, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, + 0x74, 0x69, 0x6d, 0x65, 0x22, 0x6d, 0x0a, 0x18, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x75, 0x6c, 0x65, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x22, 0x59, 0x0a, 0x19, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, + 0x65, 0x67, 0x65, 0x6e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, - 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x99, 0x02, 0x0a, 0x13, 0x49, 0x50, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x12, 0x0a, 0x04, 0x63, 0x69, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, - 0x69, 0x64, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, - 0x3f, 0x0a, 0x0c, 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x12, 0x17, 0x0a, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x68, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, - 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6f, 0x6c, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, - 0x61, 0x6d, 0x65, 0x22, 0x43, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, - 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x64, 0x64, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x9a, 0x03, 0x0a, 0x19, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x10, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, - 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x50, 0x0a, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x52, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x0e, - 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, - 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x74, - 0x5f, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, - 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x74, 0x5f, 0x74, - 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2b, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, - 0x69, 0x64, 0x22, 0x3c, 0x0a, 0x10, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, - 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0xef, 0x02, 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, - 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x30, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, - 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x31, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x04, 0x61, 0x72, 0x67, 0x31, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x32, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x32, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x33, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, 0x67, 0x33, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, - 0x70, 0x75, 0x2a, 0x39, 0x0a, 0x08, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, - 0x0a, 0x0c, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x4c, 0x33, 0x5f, 0x4c, 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4c, - 0x37, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0xea, 0x01, - 0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, - 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4f, 0x5f, 0x48, - 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, - 0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, - 0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x4f, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, - 0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x4e, 0x44, - 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x52, 0x4f, 0x4d, 0x5f, - 0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x53, - 0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4f, - 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09, 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, - 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0a, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, - 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x10, 0x0b, 0x2a, 0x48, 0x0a, 0x0a, 0x4c, 0x37, - 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, - 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x41, 0x4d, 0x50, - 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09, 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, - 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, - 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, - 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x09, 0x0a, - 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x44, 0x49, - 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x52, 0x41, 0x43, - 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x4c, 0x41, 0x54, - 0x45, 0x44, 0x10, 0x07, 0x2a, 0xf1, 0x0e, 0x0a, 0x0a, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x53, - 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x12, - 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x4d, - 0x41, 0x43, 0x10, 0x82, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, - 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x43, - 0x10, 0x83, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, - 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x84, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x50, - 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x85, 0x01, 0x12, - 0x1b, 0x0a, 0x16, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, - 0x54, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x86, 0x01, 0x12, 0x23, 0x0a, 0x1e, - 0x43, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x4f, 0x52, 0x5f, - 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x87, - 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, - 0x54, 0x43, 0x50, 0x5f, 0x41, 0x43, 0x4b, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x10, 0x88, 0x01, 0x12, - 0x1b, 0x0a, 0x16, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, - 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x89, 0x01, 0x12, 0x27, 0x0a, 0x22, - 0x43, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, - 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x43, 0x4b, - 0x45, 0x54, 0x10, 0x8a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, - 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, - 0x10, 0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d, 0x49, 0x53, 0x53, 0x45, 0x44, 0x5f, 0x54, 0x41, - 0x49, 0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, 0x8c, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x50, - 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, - 0x8e, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, - 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x8f, 0x01, 0x12, 0x18, 0x0a, 0x13, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x91, 0x01, - 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, - 0x56, 0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x92, 0x01, 0x12, 0x20, 0x0a, 0x1b, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54, - 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x93, 0x01, 0x12, 0x24, 0x0a, 0x1f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, - 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, - 0x94, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x45, - 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x95, 0x01, 0x12, 0x1e, - 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x33, 0x5f, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, 0x45, 0x53, 0x53, 0x10, 0x96, 0x01, 0x12, 0x1b, - 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x52, 0x4f, 0x55, - 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50, 0x10, 0x97, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x4e, - 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, - 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, - 0x10, 0x98, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, - 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x33, - 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x99, 0x01, 0x12, 0x27, 0x0a, 0x22, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, - 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, - 0x55, 0x4d, 0x10, 0x9a, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f, - 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x10, 0x9b, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, - 0x50, 0x56, 0x36, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x45, - 0x41, 0x44, 0x45, 0x52, 0x10, 0x9c, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x49, 0x50, 0x5f, 0x46, 0x52, - 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, - 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x9d, 0x01, 0x12, 0x1e, 0x0a, 0x19, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, - 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x9e, 0x01, 0x12, 0x28, 0x0a, 0x23, - 0x4e, 0x4f, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x43, - 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, - 0x49, 0x4e, 0x54, 0x10, 0xa0, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, - 0x5f, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x5f, 0x49, 0x4e, 0x54, 0x4f, 0x5f, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x4d, 0x41, 0x50, 0x10, 0xa1, 0x01, 0x12, 0x2b, 0x0a, 0x26, 0x52, - 0x45, 0x41, 0x43, 0x48, 0x45, 0x44, 0x5f, 0x45, 0x44, 0x54, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f, - 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f, - 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0xa2, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, - 0x52, 0x41, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xa3, 0x01, - 0x12, 0x1e, 0x0a, 0x19, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x49, - 0x53, 0x5f, 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43, 0x48, 0x41, 0x42, 0x4c, 0x45, 0x10, 0xa4, 0x01, - 0x12, 0x3a, 0x0a, 0x35, 0x4e, 0x4f, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, - 0x4f, 0x5f, 0x50, 0x45, 0x52, 0x46, 0x4f, 0x52, 0x4d, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, - 0x5f, 0x44, 0x45, 0x43, 0x49, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0xa5, 0x01, 0x12, 0x1c, 0x0a, 0x17, - 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x32, 0x5f, 0x50, - 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xa6, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x4f, - 0x5f, 0x4d, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, - 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa7, 0x01, 0x12, 0x2c, - 0x0a, 0x27, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, - 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, - 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, 0x45, 0x10, 0xa8, 0x01, 0x12, 0x16, 0x0a, 0x11, - 0x46, 0x49, 0x42, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, - 0x44, 0x10, 0xa9, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x49, 0x53, - 0x5f, 0x50, 0x52, 0x4f, 0x48, 0x49, 0x42, 0x49, 0x54, 0x45, 0x44, 0x10, 0xaa, 0x01, 0x12, 0x15, - 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, - 0x54, 0x59, 0x10, 0xab, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, - 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x10, 0xac, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x41, - 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0xad, 0x01, 0x12, - 0x13, 0x0a, 0x0e, 0x49, 0x53, 0x5f, 0x41, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x49, - 0x50, 0x10, 0xae, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x4c, 0x4f, - 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x47, 0x52, 0x41, 0x4d, 0x5f, 0x46, - 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, - 0x44, 0x10, 0xaf, 0x01, 0x12, 0x1d, 0x0a, 0x18, 0x46, 0x4f, 0x52, 0x42, 0x49, 0x44, 0x44, 0x45, - 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x10, 0xb0, 0x01, 0x12, 0x21, 0x0a, 0x1c, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x5f, 0x42, 0x59, - 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x52, 0x43, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x5f, 0x43, 0x48, - 0x45, 0x43, 0x4b, 0x10, 0xb1, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, - 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb2, - 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, - 0x47, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb3, 0x01, 0x12, 0x31, 0x0a, 0x2c, - 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, - 0x46, 0x4f, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xb4, 0x01, 0x12, - 0x10, 0x0a, 0x0b, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0xb5, - 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x56, 0x4c, 0x41, 0x4e, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, - 0x45, 0x44, 0x10, 0xb6, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, - 0x5f, 0x56, 0x4e, 0x49, 0x10, 0xb7, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, - 0x49, 0x44, 0x5f, 0x54, 0x43, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0xb8, 0x01, 0x12, - 0x0b, 0x0a, 0x06, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x10, 0xb9, 0x01, 0x12, 0x17, 0x0a, 0x12, - 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x53, 0x52, 0x56, 0x36, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x10, 0xba, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x34, 0x36, 0x10, 0xbb, - 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, 0x36, 0x34, 0x10, 0xbc, 0x01, 0x12, 0x12, 0x0a, - 0x0d, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0xbd, - 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, - 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x53, 0x4e, 0x41, 0x54, 0x5f, - 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbf, 0x01, 0x12, - 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, - 0x45, 0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, - 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, - 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, - 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, - 0x53, 0x53, 0x10, 0x02, 0x2a, 0x8d, 0x02, 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, - 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, - 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, - 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, - 0x59, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, - 0x52, 0x45, 0x5f, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, - 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, - 0x52, 0x5f, 0x56, 0x34, 0x36, 0x10, 0x06, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, - 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, - 0x10, 0x07, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, - 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, - 0x16, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, - 0x58, 0x59, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, - 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, - 0x45, 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, - 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, - 0x08, 0x01, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, - 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, - 0x0e, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, - 0x7f, 0x0a, 0x0f, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, - 0x53, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, - 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x45, 0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, - 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, - 0x15, 0x4f, 0x42, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, - 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, - 0x4c, 0x45, 0x5f, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, - 0x2a, 0xae, 0x02, 0x0a, 0x0e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, - 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, - 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, - 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, - 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, - 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, - 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, - 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, - 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, - 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, - 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, - 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x14, 0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, - 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, - 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, - 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, - 0x52, 0x54, 0x45, 0x44, 0x10, 0x0b, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, - 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, - 0x01, 0x2a, 0xd8, 0x01, 0x0a, 0x16, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, - 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, - 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, - 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, - 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, - 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, - 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x93, + 0x01, 0x0a, 0x1a, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x70, + 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, + 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x22, 0x99, 0x02, 0x0a, 0x13, 0x49, 0x50, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x69, 0x64, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x69, 0x64, 0x72, + 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, + 0x6f, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0b, 0x6f, 0x6c, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x17, 0x0a, + 0x07, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x68, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x6f, 0x6c, 0x64, 0x5f, 0x68, 0x6f, + 0x73, 0x74, 0x5f, 0x69, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x6c, 0x64, + 0x48, 0x6f, 0x73, 0x74, 0x49, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, + 0x22, 0x43, 0x0a, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, 0x64, + 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x70, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x04, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x9a, 0x03, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x10, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x5f, + 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x64, + 0x64, 0x72, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x50, 0x0a, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x61, + 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x55, 0x70, 0x73, + 0x65, 0x72, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x64, 0x64, 0x72, 0x52, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x72, 0x61, + 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x72, + 0x61, 0x66, 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x65, 0x78, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x69, 0x6e, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x66, + 0x66, 0x69, 0x63, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x69, 0x6e, 0x74, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x2b, 0x0a, 0x19, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x22, + 0x3c, 0x0a, 0x10, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, + 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef, 0x02, + 0x0a, 0x0a, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x30, + 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, + 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x31, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, 0x61, 0x72, + 0x67, 0x31, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x32, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x04, + 0x61, 0x72, 0x67, 0x32, 0x12, 0x30, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x33, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x04, 0x61, 0x72, 0x67, 0x33, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x2d, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x63, 0x70, 0x75, 0x2a, + 0x39, 0x0a, 0x08, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x09, 0x0a, + 0x05, 0x4c, 0x33, 0x5f, 0x4c, 0x34, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x37, 0x10, 0x02, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x43, 0x4b, 0x10, 0x03, 0x2a, 0x39, 0x0a, 0x08, 0x41, 0x75, + 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x49, 0x53, 0x41, 0x42, 0x4c, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x49, 0x52, 0x45, 0x10, 0x01, 0x12, + 0x14, 0x0a, 0x10, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x5f, 0x46, + 0x41, 0x49, 0x4c, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x15, 0x54, 0x72, 0x61, 0x63, 0x65, 0x4f, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, + 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, + 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x54, + 0x4f, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x54, + 0x4f, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x65, 0x12, 0x11, 0x0a, 0x0d, + 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x05, 0x12, + 0x0e, 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x06, 0x12, + 0x0d, 0x0a, 0x09, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x07, 0x12, 0x0e, + 0x0a, 0x0a, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x10, + 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x4c, 0x41, 0x59, 0x10, 0x09, + 0x12, 0x10, 0x0a, 0x0c, 0x46, 0x52, 0x4f, 0x4d, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, + 0x10, 0x0a, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x4f, 0x5f, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, + 0x10, 0x0b, 0x2a, 0x48, 0x0a, 0x0a, 0x4c, 0x37, 0x46, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x37, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, + 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x02, + 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x41, 0x4d, 0x50, 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x30, 0x0a, 0x09, + 0x49, 0x50, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x50, 0x5f, + 0x4e, 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, + 0x76, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x76, 0x36, 0x10, 0x02, 0x2a, 0x7c, + 0x0a, 0x07, 0x56, 0x65, 0x72, 0x64, 0x69, 0x63, 0x74, 0x12, 0x13, 0x0a, 0x0f, 0x56, 0x45, 0x52, + 0x44, 0x49, 0x43, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0d, + 0x0a, 0x09, 0x46, 0x4f, 0x52, 0x57, 0x41, 0x52, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x55, 0x44, 0x49, 0x54, 0x10, 0x04, + 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x45, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x05, + 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x52, 0x41, 0x43, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, + 0x54, 0x52, 0x41, 0x4e, 0x53, 0x4c, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x2a, 0xb2, 0x0f, 0x0a, + 0x0a, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x13, 0x44, + 0x52, 0x4f, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, + 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x82, 0x01, 0x12, 0x1c, 0x0a, + 0x17, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x43, 0x10, 0x83, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x49, 0x50, + 0x10, 0x84, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, + 0x4e, 0x49, 0x45, 0x44, 0x10, 0x85, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x49, 0x4e, 0x56, 0x41, 0x4c, + 0x49, 0x44, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x50, 0x45, + 0x44, 0x10, 0x86, 0x01, 0x12, 0x23, 0x0a, 0x1e, 0x43, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x4e, 0x43, + 0x41, 0x54, 0x45, 0x44, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, + 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x87, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x43, 0x54, 0x5f, + 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x41, 0x43, 0x4b, 0x5f, + 0x46, 0x4c, 0x41, 0x47, 0x10, 0x88, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x43, 0x54, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x10, 0x89, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x43, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x4e, 0x4f, + 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x5f, 0x46, + 0x52, 0x4f, 0x4d, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8a, 0x01, 0x12, 0x1c, 0x0a, + 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x33, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x8b, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x4d, + 0x49, 0x53, 0x53, 0x45, 0x44, 0x5f, 0x54, 0x41, 0x49, 0x4c, 0x5f, 0x43, 0x41, 0x4c, 0x4c, 0x10, + 0x8c, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x52, 0x49, 0x54, + 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x8d, 0x01, + 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x34, 0x5f, 0x50, + 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0x8e, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x10, 0x8f, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, + 0x49, 0x43, 0x4d, 0x50, 0x56, 0x34, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x90, 0x01, 0x12, 0x18, + 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, + 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x91, 0x01, 0x12, 0x18, 0x0a, 0x13, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, + 0x92, 0x01, 0x12, 0x20, 0x0a, 0x1b, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x52, + 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4b, 0x45, + 0x59, 0x10, 0x93, 0x01, 0x12, 0x24, 0x0a, 0x1f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, + 0x54, 0x52, 0x49, 0x45, 0x56, 0x49, 0x4e, 0x47, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, + 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x94, 0x01, 0x12, 0x1a, 0x0a, 0x15, 0x49, 0x4e, + 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x56, 0x45, 0x5f, 0x4f, 0x50, 0x54, + 0x49, 0x4f, 0x4e, 0x10, 0x95, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x5f, 0x4c, 0x33, 0x5f, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x41, 0x44, 0x44, 0x52, + 0x45, 0x53, 0x53, 0x10, 0x96, 0x01, 0x12, 0x1b, 0x0a, 0x16, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, + 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x52, 0x4f, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x49, 0x50, + 0x10, 0x97, 0x01, 0x12, 0x26, 0x0a, 0x21, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, + 0x4e, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, + 0x45, 0x52, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x98, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, + 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, 0x33, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, + 0x4d, 0x10, 0x99, 0x01, 0x12, 0x27, 0x0a, 0x22, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x57, 0x48, + 0x49, 0x4c, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x4c, + 0x34, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x10, 0x9a, 0x01, 0x12, 0x1c, 0x0a, + 0x17, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x9b, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x49, + 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x5f, 0x45, 0x58, 0x54, 0x45, + 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x9c, 0x01, 0x12, + 0x23, 0x0a, 0x1e, 0x49, 0x50, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, + 0x44, 0x10, 0x9d, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, + 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, + 0x44, 0x10, 0x9e, 0x01, 0x12, 0x28, 0x0a, 0x23, 0x4e, 0x4f, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, + 0x4c, 0x5f, 0x4f, 0x52, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0xa0, 0x01, 0x12, 0x23, + 0x0a, 0x1e, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x54, 0x4f, 0x5f, 0x49, 0x4e, 0x53, 0x45, + 0x52, 0x54, 0x5f, 0x49, 0x4e, 0x54, 0x4f, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x4d, 0x41, 0x50, + 0x10, 0xa1, 0x01, 0x12, 0x2b, 0x0a, 0x26, 0x52, 0x45, 0x41, 0x43, 0x48, 0x45, 0x44, 0x5f, 0x45, + 0x44, 0x54, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x4e, 0x47, + 0x5f, 0x44, 0x52, 0x4f, 0x50, 0x5f, 0x48, 0x4f, 0x52, 0x49, 0x5a, 0x4f, 0x4e, 0x10, 0xa2, 0x01, + 0x12, 0x26, 0x0a, 0x21, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x4b, 0x49, 0x4e, 0x47, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xa3, 0x01, 0x12, 0x1e, 0x0a, 0x19, 0x4c, 0x4f, 0x43, 0x41, + 0x4c, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x5f, 0x49, 0x53, 0x5f, 0x55, 0x4e, 0x52, 0x45, 0x41, 0x43, + 0x48, 0x41, 0x42, 0x4c, 0x45, 0x10, 0xa4, 0x01, 0x12, 0x3a, 0x0a, 0x35, 0x4e, 0x4f, 0x5f, 0x43, + 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x41, 0x56, 0x41, + 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x5f, 0x54, 0x4f, 0x5f, 0x50, 0x45, 0x52, 0x46, 0x4f, 0x52, + 0x4d, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x43, 0x49, 0x53, 0x49, 0x4f, + 0x4e, 0x10, 0xa5, 0x01, 0x12, 0x1c, 0x0a, 0x17, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, + 0x54, 0x45, 0x44, 0x5f, 0x4c, 0x32, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x10, + 0xa6, 0x01, 0x12, 0x22, 0x0a, 0x1d, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, + 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, + 0x41, 0x44, 0x45, 0x10, 0xa7, 0x01, 0x12, 0x2c, 0x0a, 0x27, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, + 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, + 0x4f, 0x52, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x51, 0x55, 0x45, 0x52, 0x41, 0x44, + 0x45, 0x10, 0xa8, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x46, 0x49, 0x42, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, + 0x55, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xa9, 0x01, 0x12, 0x28, 0x0a, 0x23, + 0x45, 0x4e, 0x43, 0x41, 0x50, 0x53, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, + 0x41, 0x46, 0x46, 0x49, 0x43, 0x5f, 0x49, 0x53, 0x5f, 0x50, 0x52, 0x4f, 0x48, 0x49, 0x42, 0x49, + 0x54, 0x45, 0x44, 0x10, 0xaa, 0x01, 0x12, 0x15, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, + 0x44, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0xab, 0x01, 0x12, 0x13, 0x0a, + 0x0e, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x53, 0x45, 0x4e, 0x44, 0x45, 0x52, 0x10, + 0xac, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x4e, 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x4e, 0x45, + 0x45, 0x44, 0x45, 0x44, 0x10, 0xad, 0x01, 0x12, 0x13, 0x0a, 0x0e, 0x49, 0x53, 0x5f, 0x41, 0x5f, + 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x49, 0x50, 0x10, 0xae, 0x01, 0x12, 0x2e, 0x0a, 0x29, + 0x46, 0x49, 0x52, 0x53, 0x54, 0x5f, 0x4c, 0x4f, 0x47, 0x49, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x41, + 0x54, 0x41, 0x47, 0x52, 0x41, 0x4d, 0x5f, 0x46, 0x52, 0x41, 0x47, 0x4d, 0x45, 0x4e, 0x54, 0x5f, + 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xaf, 0x01, 0x12, 0x1d, 0x0a, 0x18, + 0x46, 0x4f, 0x52, 0x42, 0x49, 0x44, 0x44, 0x45, 0x4e, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x56, 0x36, + 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0xb0, 0x01, 0x12, 0x21, 0x0a, 0x1c, 0x44, + 0x45, 0x4e, 0x49, 0x45, 0x44, 0x5f, 0x42, 0x59, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x52, 0x43, 0x5f, + 0x52, 0x41, 0x4e, 0x47, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0xb1, 0x01, 0x12, 0x19, + 0x0a, 0x14, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, + 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0xb2, 0x01, 0x12, 0x19, 0x0a, 0x14, 0x53, 0x4f, 0x43, + 0x4b, 0x45, 0x54, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, + 0x44, 0x10, 0xb3, 0x01, 0x12, 0x31, 0x0a, 0x2c, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x52, 0x45, + 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x55, + 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x54, + 0x4f, 0x43, 0x4f, 0x4c, 0x10, 0xb4, 0x01, 0x12, 0x10, 0x0a, 0x0b, 0x50, 0x4f, 0x4c, 0x49, 0x43, + 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x59, 0x10, 0xb5, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x56, 0x4c, 0x41, + 0x4e, 0x5f, 0x46, 0x49, 0x4c, 0x54, 0x45, 0x52, 0x45, 0x44, 0x10, 0xb6, 0x01, 0x12, 0x10, 0x0a, + 0x0b, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x56, 0x4e, 0x49, 0x10, 0xb7, 0x01, 0x12, + 0x16, 0x0a, 0x11, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x54, 0x43, 0x5f, 0x42, 0x55, + 0x46, 0x46, 0x45, 0x52, 0x10, 0xb8, 0x01, 0x12, 0x0b, 0x0a, 0x06, 0x4e, 0x4f, 0x5f, 0x53, 0x49, + 0x44, 0x10, 0xb9, 0x01, 0x12, 0x17, 0x0a, 0x12, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, + 0x53, 0x52, 0x56, 0x36, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0xba, 0x01, 0x12, 0x0a, 0x0a, + 0x05, 0x4e, 0x41, 0x54, 0x34, 0x36, 0x10, 0xbb, 0x01, 0x12, 0x0a, 0x0a, 0x05, 0x4e, 0x41, 0x54, + 0x36, 0x34, 0x10, 0xbc, 0x01, 0x12, 0x12, 0x0a, 0x0d, 0x41, 0x55, 0x54, 0x48, 0x5f, 0x52, 0x45, + 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0xbd, 0x01, 0x12, 0x14, 0x0a, 0x0f, 0x43, 0x54, 0x5f, + 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbe, 0x01, 0x12, + 0x16, 0x0a, 0x11, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x4e, 0x4f, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, + 0x4f, 0x55, 0x4e, 0x44, 0x10, 0xbf, 0x01, 0x12, 0x17, 0x0a, 0x12, 0x49, 0x4e, 0x56, 0x41, 0x4c, + 0x49, 0x44, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x49, 0x44, 0x10, 0xc0, 0x01, + 0x12, 0x27, 0x0a, 0x22, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x44, 0x53, 0x52, + 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0xc1, 0x01, 0x12, 0x16, 0x0a, 0x11, 0x4e, 0x4f, 0x5f, + 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x5f, 0x47, 0x41, 0x54, 0x45, 0x57, 0x41, 0x59, 0x10, 0xc2, + 0x01, 0x2a, 0x4a, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x44, 0x69, 0x72, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x19, 0x54, 0x52, 0x41, 0x46, 0x46, 0x49, 0x43, + 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, + 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x02, 0x2a, 0x8d, 0x02, + 0x0a, 0x11, 0x44, 0x65, 0x62, 0x75, 0x67, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x50, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, + 0x52, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x04, 0x12, 0x17, 0x0a, 0x13, + 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x46, 0x52, 0x4f, 0x4d, + 0x5f, 0x4c, 0x42, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, + 0x54, 0x55, 0x52, 0x45, 0x5f, 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x34, 0x36, 0x10, 0x06, + 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, + 0x41, 0x46, 0x54, 0x45, 0x52, 0x5f, 0x56, 0x36, 0x34, 0x10, 0x07, 0x12, 0x19, 0x0a, 0x15, 0x44, + 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, + 0x5f, 0x50, 0x52, 0x45, 0x10, 0x08, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, + 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x50, 0x4f, 0x53, 0x54, + 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, + 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x10, 0x0a, 0x12, 0x19, 0x0a, 0x15, + 0x44, 0x42, 0x47, 0x5f, 0x43, 0x41, 0x50, 0x54, 0x55, 0x52, 0x45, 0x5f, 0x53, 0x4e, 0x41, 0x54, + 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x0b, 0x22, 0x04, 0x08, 0x01, 0x10, 0x03, 0x2a, 0x39, 0x0a, + 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x52, 0x65, 0x63, 0x6f, + 0x72, 0x64, 0x4c, 0x6f, 0x73, 0x74, 0x10, 0x02, 0x2a, 0x7f, 0x0a, 0x0f, 0x4c, 0x6f, 0x73, 0x74, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x4c, 0x4f, 0x53, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, + 0x54, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x45, + 0x52, 0x46, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x55, + 0x46, 0x46, 0x45, 0x52, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x42, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x53, 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, 0x10, + 0x02, 0x12, 0x16, 0x0a, 0x12, 0x48, 0x55, 0x42, 0x42, 0x4c, 0x45, 0x5f, 0x52, 0x49, 0x4e, 0x47, + 0x5f, 0x42, 0x55, 0x46, 0x46, 0x45, 0x52, 0x10, 0x03, 0x2a, 0xae, 0x02, 0x0a, 0x0e, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, + 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x4f, 0x4c, 0x49, + 0x43, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, + 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x04, + 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, 0x47, + 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, + 0x05, 0x12, 0x1f, 0x0a, 0x1b, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x52, 0x45, + 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, + 0x10, 0x06, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x4e, 0x44, 0x50, + 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x14, + 0x0a, 0x10, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, + 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x49, 0x50, 0x43, 0x41, 0x43, 0x48, 0x45, 0x5f, + 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x45, 0x52, + 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0b, 0x12, + 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, + 0x45, 0x44, 0x10, 0x0c, 0x22, 0x04, 0x08, 0x01, 0x10, 0x01, 0x2a, 0xd8, 0x01, 0x0a, 0x16, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, + 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, + 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, 0x57, 0x44, 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, - 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, - 0x56, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, - 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, - 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0xdd, 0x0c, 0x0a, - 0x0e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, - 0x4e, 0x45, 0x52, 0x49, 0x43, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, - 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, - 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, - 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, - 0x04, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, - 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, - 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, - 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, - 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, - 0x43, 0x48, 0x10, 0x08, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, - 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, - 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, - 0x10, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, - 0x45, 0x10, 0x0b, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, - 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, - 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, - 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, - 0x5f, 0x43, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, - 0x09, 0x44, 0x42, 0x47, 0x5f, 0x44, 0x45, 0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, - 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x52, 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, - 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, - 0x12, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, - 0x10, 0x13, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, - 0x43, 0x4b, 0x10, 0x14, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, - 0x48, 0x41, 0x53, 0x48, 0x10, 0x15, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, - 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, - 0x44, 0x10, 0x16, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, - 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, - 0x41, 0x49, 0x4c, 0x10, 0x17, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, - 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, - 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x18, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, - 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, - 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, + 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x46, + 0x57, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, + 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x49, 0x52, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, + 0x53, 0x4f, 0x43, 0x4b, 0x5f, 0x58, 0x4c, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x4f, 0x49, 0x4e, 0x54, + 0x5f, 0x50, 0x4f, 0x53, 0x54, 0x5f, 0x44, 0x49, 0x52, 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, + 0x52, 0x45, 0x56, 0x10, 0x04, 0x2a, 0xdd, 0x0c, 0x0a, 0x0e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0f, 0x0a, 0x0b, 0x44, 0x42, 0x47, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x49, 0x43, 0x10, 0x01, + 0x12, 0x16, 0x0a, 0x12, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x4f, 0x43, 0x41, 0x4c, 0x5f, 0x44, 0x45, + 0x4c, 0x49, 0x56, 0x45, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, + 0x45, 0x4e, 0x43, 0x41, 0x50, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, + 0x58, 0x43, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, + 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x44, 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, + 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, + 0x55, 0x50, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, + 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x07, 0x12, 0x10, 0x0a, 0x0c, 0x44, + 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x08, 0x12, 0x12, 0x0a, + 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, + 0x09, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, + 0x54, 0x45, 0x44, 0x32, 0x10, 0x0a, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, + 0x4d, 0x50, 0x36, 0x5f, 0x48, 0x41, 0x4e, 0x44, 0x4c, 0x45, 0x10, 0x0b, 0x12, 0x15, 0x0a, 0x11, + 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, + 0x54, 0x10, 0x0c, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, 0x50, 0x36, + 0x5f, 0x4e, 0x53, 0x10, 0x0d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x43, 0x4d, + 0x50, 0x36, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, + 0x10, 0x0e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x56, 0x45, 0x52, + 0x44, 0x49, 0x43, 0x54, 0x10, 0x0f, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x42, 0x47, 0x5f, 0x44, 0x45, + 0x43, 0x41, 0x50, 0x10, 0x10, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4f, 0x52, + 0x54, 0x5f, 0x4d, 0x41, 0x50, 0x10, 0x11, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x52, 0x45, 0x54, 0x10, 0x12, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x42, + 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x13, 0x12, 0x10, 0x0a, 0x0c, 0x44, + 0x42, 0x47, 0x5f, 0x54, 0x4f, 0x5f, 0x53, 0x54, 0x41, 0x43, 0x4b, 0x10, 0x14, 0x12, 0x10, 0x0a, + 0x0c, 0x44, 0x42, 0x47, 0x5f, 0x50, 0x4b, 0x54, 0x5f, 0x48, 0x41, 0x53, 0x48, 0x10, 0x15, 0x12, + 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, + 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x16, 0x12, 0x20, 0x0a, 0x1c, + 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, + 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x17, 0x12, 0x1f, + 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, + 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x18, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, - 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, - 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, + 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, + 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x19, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, - 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, - 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, - 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, - 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, - 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, - 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, - 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, - 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, - 0x1f, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, - 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, - 0x10, 0x20, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, - 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, - 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, + 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, + 0x1a, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x4c, 0x4f, 0x4f, + 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, + 0x10, 0x1b, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, + 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, + 0x10, 0x1c, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x36, 0x5f, 0x52, 0x45, + 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x1d, 0x12, 0x1b, 0x0a, 0x17, 0x44, + 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, + 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x1e, 0x12, 0x20, 0x0a, 0x1c, 0x44, 0x42, 0x47, 0x5f, + 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x46, 0x52, 0x4f, 0x4e, 0x54, + 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x1f, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, + 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, + 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x10, 0x20, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, - 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, - 0x49, 0x4c, 0x10, 0x22, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, - 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, - 0x41, 0x49, 0x4c, 0x10, 0x23, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, - 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, - 0x4b, 0x55, 0x50, 0x10, 0x24, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, - 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, - 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, - 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, - 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, - 0x41, 0x54, 0x5f, 0x52, 0x45, 0x56, 0x10, 0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, - 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, - 0x44, 0x42, 0x47, 0x5f, 0x52, 0x52, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, - 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4c, 0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, - 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, - 0x50, 0x10, 0x2a, 0x12, 0x17, 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, - 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, - 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, - 0x44, 0x41, 0x54, 0x45, 0x10, 0x2c, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, - 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, - 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, - 0x45, 0x52, 0x10, 0x2e, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, - 0x45, 0x56, 0x5f, 0x45, 0x4e, 0x43, 0x41, 0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, - 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, - 0x30, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, - 0x55, 0x50, 0x34, 0x5f, 0x32, 0x10, 0x31, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, - 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, - 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, - 0x10, 0x33, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, - 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x32, 0x10, 0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, - 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, - 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, - 0x36, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, - 0x54, 0x45, 0x10, 0x37, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, - 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, - 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, - 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, - 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, - 0x45, 0x45, 0x44, 0x34, 0x10, 0x3a, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, - 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, - 0x10, 0x3b, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, - 0x4c, 0x45, 0x5f, 0x43, 0x54, 0x10, 0x3c, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, - 0x4e, 0x48, 0x45, 0x52, 0x49, 0x54, 0x5f, 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, - 0x3d, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, - 0x55, 0x50, 0x34, 0x10, 0x3e, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, - 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, - 0x5f, 0x53, 0x4b, 0x5f, 0x41, 0x53, 0x53, 0x49, 0x47, 0x4e, 0x10, 0x40, 0x42, 0x26, 0x5a, 0x24, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, - 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, - 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, + 0x53, 0x53, 0x10, 0x21, 0x12, 0x27, 0x0a, 0x23, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, + 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, + 0x4c, 0x4f, 0x54, 0x5f, 0x56, 0x32, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x22, 0x12, 0x1f, 0x0a, + 0x1b, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x5f, + 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x10, 0x23, 0x12, 0x1e, + 0x0a, 0x1a, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, + 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x24, 0x12, 0x17, + 0x0a, 0x13, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x52, 0x45, 0x56, 0x45, 0x52, 0x53, + 0x45, 0x5f, 0x4e, 0x41, 0x54, 0x10, 0x25, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4c, + 0x42, 0x34, 0x5f, 0x4c, 0x4f, 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, + 0x10, 0x26, 0x12, 0x1d, 0x0a, 0x19, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x34, 0x5f, 0x4c, 0x4f, + 0x4f, 0x50, 0x42, 0x41, 0x43, 0x4b, 0x5f, 0x53, 0x4e, 0x41, 0x54, 0x5f, 0x52, 0x45, 0x56, 0x10, + 0x27, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, + 0x55, 0x50, 0x34, 0x10, 0x28, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x52, 0x5f, + 0x42, 0x41, 0x43, 0x4b, 0x45, 0x4e, 0x44, 0x5f, 0x53, 0x4c, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x4c, + 0x10, 0x29, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, + 0x4f, 0x58, 0x59, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x10, 0x2a, 0x12, 0x17, 0x0a, 0x13, + 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x46, 0x4f, + 0x55, 0x4e, 0x44, 0x10, 0x2b, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x52, 0x45, 0x56, + 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x2c, 0x12, + 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, + 0x10, 0x2d, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, + 0x5f, 0x49, 0x4e, 0x5f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x10, 0x2e, 0x12, 0x15, 0x0a, + 0x11, 0x44, 0x42, 0x47, 0x5f, 0x4e, 0x45, 0x54, 0x44, 0x45, 0x56, 0x5f, 0x45, 0x4e, 0x43, 0x41, + 0x50, 0x34, 0x10, 0x2f, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, + 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x31, 0x10, 0x30, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, + 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x5f, 0x32, 0x10, 0x31, + 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, + 0x45, 0x44, 0x34, 0x10, 0x32, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, + 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x31, 0x10, 0x33, 0x12, 0x14, 0x0a, 0x10, 0x44, + 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, 0x5f, 0x32, 0x10, + 0x34, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x42, 0x47, 0x5f, 0x43, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, + 0x54, 0x45, 0x44, 0x36, 0x10, 0x35, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, + 0x49, 0x50, 0x5f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x10, 0x36, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, + 0x47, 0x5f, 0x4c, 0x34, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x37, 0x12, 0x19, 0x0a, + 0x15, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, + 0x41, 0x49, 0x4c, 0x45, 0x44, 0x34, 0x10, 0x38, 0x12, 0x19, 0x0a, 0x15, 0x44, 0x42, 0x47, 0x5f, + 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, + 0x36, 0x10, 0x39, 0x12, 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, + 0x5f, 0x4d, 0x41, 0x50, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x34, 0x10, 0x3a, 0x12, + 0x1a, 0x0a, 0x16, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x50, 0x5f, 0x49, 0x44, 0x5f, 0x4d, 0x41, 0x50, + 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x36, 0x10, 0x3b, 0x12, 0x13, 0x0a, 0x0f, 0x44, + 0x42, 0x47, 0x5f, 0x4c, 0x42, 0x5f, 0x53, 0x54, 0x41, 0x4c, 0x45, 0x5f, 0x43, 0x54, 0x10, 0x3c, + 0x12, 0x18, 0x0a, 0x14, 0x44, 0x42, 0x47, 0x5f, 0x49, 0x4e, 0x48, 0x45, 0x52, 0x49, 0x54, 0x5f, + 0x49, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x54, 0x59, 0x10, 0x3d, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x42, + 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x34, 0x10, 0x3e, 0x12, 0x12, + 0x0a, 0x0e, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x4c, 0x4f, 0x4f, 0x4b, 0x55, 0x50, 0x36, + 0x10, 0x3f, 0x12, 0x11, 0x0a, 0x0d, 0x44, 0x42, 0x47, 0x5f, 0x53, 0x4b, 0x5f, 0x41, 0x53, 0x53, + 0x49, 0x47, 0x4e, 0x10, 0x40, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, + 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -5232,132 +5309,134 @@ func file_flow_flow_proto_rawDescGZIP() []byte { return file_flow_flow_proto_rawDescData } -var file_flow_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 13) +var file_flow_flow_proto_enumTypes = make([]protoimpl.EnumInfo, 14) var file_flow_flow_proto_msgTypes = make([]protoimpl.MessageInfo, 36) var file_flow_flow_proto_goTypes = []interface{}{ (FlowType)(0), // 0: flow.FlowType - (TraceObservationPoint)(0), // 1: flow.TraceObservationPoint - (L7FlowType)(0), // 2: flow.L7FlowType - (IPVersion)(0), // 3: flow.IPVersion - (Verdict)(0), // 4: flow.Verdict - (DropReason)(0), // 5: flow.DropReason - (TrafficDirection)(0), // 6: flow.TrafficDirection - (DebugCapturePoint)(0), // 7: flow.DebugCapturePoint - (EventType)(0), // 8: flow.EventType - (LostEventSource)(0), // 9: flow.LostEventSource - (AgentEventType)(0), // 10: flow.AgentEventType - (SocketTranslationPoint)(0), // 11: flow.SocketTranslationPoint - (DebugEventType)(0), // 12: flow.DebugEventType - (*Flow)(nil), // 13: flow.Flow - (*Layer4)(nil), // 14: flow.Layer4 - (*Layer7)(nil), // 15: flow.Layer7 - (*TraceContext)(nil), // 16: flow.TraceContext - (*TraceParent)(nil), // 17: flow.TraceParent - (*Endpoint)(nil), // 18: flow.Endpoint - (*Workload)(nil), // 19: flow.Workload - (*TCP)(nil), // 20: flow.TCP - (*IP)(nil), // 21: flow.IP - (*Ethernet)(nil), // 22: flow.Ethernet - (*TCPFlags)(nil), // 23: flow.TCPFlags - (*UDP)(nil), // 24: flow.UDP - (*SCTP)(nil), // 25: flow.SCTP - (*ICMPv4)(nil), // 26: flow.ICMPv4 - (*ICMPv6)(nil), // 27: flow.ICMPv6 - (*EventTypeFilter)(nil), // 28: flow.EventTypeFilter - (*CiliumEventType)(nil), // 29: flow.CiliumEventType - (*FlowFilter)(nil), // 30: flow.FlowFilter - (*DNS)(nil), // 31: flow.DNS - (*HTTPHeader)(nil), // 32: flow.HTTPHeader - (*HTTP)(nil), // 33: flow.HTTP - (*Kafka)(nil), // 34: flow.Kafka - (*Service)(nil), // 35: flow.Service - (*LostEvent)(nil), // 36: flow.LostEvent - (*AgentEvent)(nil), // 37: flow.AgentEvent - (*AgentEventUnknown)(nil), // 38: flow.AgentEventUnknown - (*TimeNotification)(nil), // 39: flow.TimeNotification - (*PolicyUpdateNotification)(nil), // 40: flow.PolicyUpdateNotification - (*EndpointRegenNotification)(nil), // 41: flow.EndpointRegenNotification - (*EndpointUpdateNotification)(nil), // 42: flow.EndpointUpdateNotification - (*IPCacheNotification)(nil), // 43: flow.IPCacheNotification - (*ServiceUpsertNotificationAddr)(nil), // 44: flow.ServiceUpsertNotificationAddr - (*ServiceUpsertNotification)(nil), // 45: flow.ServiceUpsertNotification - (*ServiceDeleteNotification)(nil), // 46: flow.ServiceDeleteNotification - (*NetworkInterface)(nil), // 47: flow.NetworkInterface - (*DebugEvent)(nil), // 48: flow.DebugEvent - (*timestamppb.Timestamp)(nil), // 49: google.protobuf.Timestamp - (*wrapperspb.BoolValue)(nil), // 50: google.protobuf.BoolValue - (*wrapperspb.Int32Value)(nil), // 51: google.protobuf.Int32Value - (*wrapperspb.UInt32Value)(nil), // 52: google.protobuf.UInt32Value + (AuthType)(0), // 1: flow.AuthType + (TraceObservationPoint)(0), // 2: flow.TraceObservationPoint + (L7FlowType)(0), // 3: flow.L7FlowType + (IPVersion)(0), // 4: flow.IPVersion + (Verdict)(0), // 5: flow.Verdict + (DropReason)(0), // 6: flow.DropReason + (TrafficDirection)(0), // 7: flow.TrafficDirection + (DebugCapturePoint)(0), // 8: flow.DebugCapturePoint + (EventType)(0), // 9: flow.EventType + (LostEventSource)(0), // 10: flow.LostEventSource + (AgentEventType)(0), // 11: flow.AgentEventType + (SocketTranslationPoint)(0), // 12: flow.SocketTranslationPoint + (DebugEventType)(0), // 13: flow.DebugEventType + (*Flow)(nil), // 14: flow.Flow + (*Layer4)(nil), // 15: flow.Layer4 + (*Layer7)(nil), // 16: flow.Layer7 + (*TraceContext)(nil), // 17: flow.TraceContext + (*TraceParent)(nil), // 18: flow.TraceParent + (*Endpoint)(nil), // 19: flow.Endpoint + (*Workload)(nil), // 20: flow.Workload + (*TCP)(nil), // 21: flow.TCP + (*IP)(nil), // 22: flow.IP + (*Ethernet)(nil), // 23: flow.Ethernet + (*TCPFlags)(nil), // 24: flow.TCPFlags + (*UDP)(nil), // 25: flow.UDP + (*SCTP)(nil), // 26: flow.SCTP + (*ICMPv4)(nil), // 27: flow.ICMPv4 + (*ICMPv6)(nil), // 28: flow.ICMPv6 + (*EventTypeFilter)(nil), // 29: flow.EventTypeFilter + (*CiliumEventType)(nil), // 30: flow.CiliumEventType + (*FlowFilter)(nil), // 31: flow.FlowFilter + (*DNS)(nil), // 32: flow.DNS + (*HTTPHeader)(nil), // 33: flow.HTTPHeader + (*HTTP)(nil), // 34: flow.HTTP + (*Kafka)(nil), // 35: flow.Kafka + (*Service)(nil), // 36: flow.Service + (*LostEvent)(nil), // 37: flow.LostEvent + (*AgentEvent)(nil), // 38: flow.AgentEvent + (*AgentEventUnknown)(nil), // 39: flow.AgentEventUnknown + (*TimeNotification)(nil), // 40: flow.TimeNotification + (*PolicyUpdateNotification)(nil), // 41: flow.PolicyUpdateNotification + (*EndpointRegenNotification)(nil), // 42: flow.EndpointRegenNotification + (*EndpointUpdateNotification)(nil), // 43: flow.EndpointUpdateNotification + (*IPCacheNotification)(nil), // 44: flow.IPCacheNotification + (*ServiceUpsertNotificationAddr)(nil), // 45: flow.ServiceUpsertNotificationAddr + (*ServiceUpsertNotification)(nil), // 46: flow.ServiceUpsertNotification + (*ServiceDeleteNotification)(nil), // 47: flow.ServiceDeleteNotification + (*NetworkInterface)(nil), // 48: flow.NetworkInterface + (*DebugEvent)(nil), // 49: flow.DebugEvent + (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp + (*wrapperspb.BoolValue)(nil), // 51: google.protobuf.BoolValue + (*wrapperspb.Int32Value)(nil), // 52: google.protobuf.Int32Value + (*wrapperspb.UInt32Value)(nil), // 53: google.protobuf.UInt32Value } var file_flow_flow_proto_depIdxs = []int32{ - 49, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp - 4, // 1: flow.Flow.verdict:type_name -> flow.Verdict - 22, // 2: flow.Flow.ethernet:type_name -> flow.Ethernet - 21, // 3: flow.Flow.IP:type_name -> flow.IP - 14, // 4: flow.Flow.l4:type_name -> flow.Layer4 - 18, // 5: flow.Flow.source:type_name -> flow.Endpoint - 18, // 6: flow.Flow.destination:type_name -> flow.Endpoint - 0, // 7: flow.Flow.Type:type_name -> flow.FlowType - 15, // 8: flow.Flow.l7:type_name -> flow.Layer7 - 29, // 9: flow.Flow.event_type:type_name -> flow.CiliumEventType - 35, // 10: flow.Flow.source_service:type_name -> flow.Service - 35, // 11: flow.Flow.destination_service:type_name -> flow.Service - 6, // 12: flow.Flow.traffic_direction:type_name -> flow.TrafficDirection - 1, // 13: flow.Flow.trace_observation_point:type_name -> flow.TraceObservationPoint - 5, // 14: flow.Flow.drop_reason_desc:type_name -> flow.DropReason - 50, // 15: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue - 7, // 16: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint - 47, // 17: flow.Flow.interface:type_name -> flow.NetworkInterface - 16, // 18: flow.Flow.trace_context:type_name -> flow.TraceContext - 11, // 19: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint - 20, // 20: flow.Layer4.TCP:type_name -> flow.TCP - 24, // 21: flow.Layer4.UDP:type_name -> flow.UDP - 26, // 22: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4 - 27, // 23: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6 - 25, // 24: flow.Layer4.SCTP:type_name -> flow.SCTP - 2, // 25: flow.Layer7.type:type_name -> flow.L7FlowType - 31, // 26: flow.Layer7.dns:type_name -> flow.DNS - 33, // 27: flow.Layer7.http:type_name -> flow.HTTP - 34, // 28: flow.Layer7.kafka:type_name -> flow.Kafka - 17, // 29: flow.TraceContext.parent:type_name -> flow.TraceParent - 19, // 30: flow.Endpoint.workloads:type_name -> flow.Workload - 23, // 31: flow.TCP.flags:type_name -> flow.TCPFlags - 3, // 32: flow.IP.ipVersion:type_name -> flow.IPVersion - 19, // 33: flow.FlowFilter.source_workload:type_name -> flow.Workload - 19, // 34: flow.FlowFilter.destination_workload:type_name -> flow.Workload - 6, // 35: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection - 4, // 36: flow.FlowFilter.verdict:type_name -> flow.Verdict - 28, // 37: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter - 23, // 38: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags - 3, // 39: flow.FlowFilter.ip_version:type_name -> flow.IPVersion - 32, // 40: flow.HTTP.headers:type_name -> flow.HTTPHeader - 9, // 41: flow.LostEvent.source:type_name -> flow.LostEventSource - 51, // 42: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value - 10, // 43: flow.AgentEvent.type:type_name -> flow.AgentEventType - 38, // 44: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown - 39, // 45: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification - 40, // 46: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification - 41, // 47: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification - 42, // 48: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification - 43, // 49: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification - 45, // 50: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification - 46, // 51: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification - 49, // 52: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp - 52, // 53: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value - 44, // 54: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr - 44, // 55: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr - 12, // 56: flow.DebugEvent.type:type_name -> flow.DebugEventType - 18, // 57: flow.DebugEvent.source:type_name -> flow.Endpoint - 52, // 58: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value - 52, // 59: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value - 52, // 60: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value - 52, // 61: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value - 51, // 62: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value - 63, // [63:63] is the sub-list for method output_type - 63, // [63:63] is the sub-list for method input_type - 63, // [63:63] is the sub-list for extension type_name - 63, // [63:63] is the sub-list for extension extendee - 0, // [0:63] is the sub-list for field type_name + 50, // 0: flow.Flow.time:type_name -> google.protobuf.Timestamp + 5, // 1: flow.Flow.verdict:type_name -> flow.Verdict + 1, // 2: flow.Flow.auth_type:type_name -> flow.AuthType + 23, // 3: flow.Flow.ethernet:type_name -> flow.Ethernet + 22, // 4: flow.Flow.IP:type_name -> flow.IP + 15, // 5: flow.Flow.l4:type_name -> flow.Layer4 + 19, // 6: flow.Flow.source:type_name -> flow.Endpoint + 19, // 7: flow.Flow.destination:type_name -> flow.Endpoint + 0, // 8: flow.Flow.Type:type_name -> flow.FlowType + 16, // 9: flow.Flow.l7:type_name -> flow.Layer7 + 30, // 10: flow.Flow.event_type:type_name -> flow.CiliumEventType + 36, // 11: flow.Flow.source_service:type_name -> flow.Service + 36, // 12: flow.Flow.destination_service:type_name -> flow.Service + 7, // 13: flow.Flow.traffic_direction:type_name -> flow.TrafficDirection + 2, // 14: flow.Flow.trace_observation_point:type_name -> flow.TraceObservationPoint + 6, // 15: flow.Flow.drop_reason_desc:type_name -> flow.DropReason + 51, // 16: flow.Flow.is_reply:type_name -> google.protobuf.BoolValue + 8, // 17: flow.Flow.debug_capture_point:type_name -> flow.DebugCapturePoint + 48, // 18: flow.Flow.interface:type_name -> flow.NetworkInterface + 17, // 19: flow.Flow.trace_context:type_name -> flow.TraceContext + 12, // 20: flow.Flow.sock_xlate_point:type_name -> flow.SocketTranslationPoint + 21, // 21: flow.Layer4.TCP:type_name -> flow.TCP + 25, // 22: flow.Layer4.UDP:type_name -> flow.UDP + 27, // 23: flow.Layer4.ICMPv4:type_name -> flow.ICMPv4 + 28, // 24: flow.Layer4.ICMPv6:type_name -> flow.ICMPv6 + 26, // 25: flow.Layer4.SCTP:type_name -> flow.SCTP + 3, // 26: flow.Layer7.type:type_name -> flow.L7FlowType + 32, // 27: flow.Layer7.dns:type_name -> flow.DNS + 34, // 28: flow.Layer7.http:type_name -> flow.HTTP + 35, // 29: flow.Layer7.kafka:type_name -> flow.Kafka + 18, // 30: flow.TraceContext.parent:type_name -> flow.TraceParent + 20, // 31: flow.Endpoint.workloads:type_name -> flow.Workload + 24, // 32: flow.TCP.flags:type_name -> flow.TCPFlags + 4, // 33: flow.IP.ipVersion:type_name -> flow.IPVersion + 20, // 34: flow.FlowFilter.source_workload:type_name -> flow.Workload + 20, // 35: flow.FlowFilter.destination_workload:type_name -> flow.Workload + 7, // 36: flow.FlowFilter.traffic_direction:type_name -> flow.TrafficDirection + 5, // 37: flow.FlowFilter.verdict:type_name -> flow.Verdict + 29, // 38: flow.FlowFilter.event_type:type_name -> flow.EventTypeFilter + 24, // 39: flow.FlowFilter.tcp_flags:type_name -> flow.TCPFlags + 4, // 40: flow.FlowFilter.ip_version:type_name -> flow.IPVersion + 33, // 41: flow.HTTP.headers:type_name -> flow.HTTPHeader + 10, // 42: flow.LostEvent.source:type_name -> flow.LostEventSource + 52, // 43: flow.LostEvent.cpu:type_name -> google.protobuf.Int32Value + 11, // 44: flow.AgentEvent.type:type_name -> flow.AgentEventType + 39, // 45: flow.AgentEvent.unknown:type_name -> flow.AgentEventUnknown + 40, // 46: flow.AgentEvent.agent_start:type_name -> flow.TimeNotification + 41, // 47: flow.AgentEvent.policy_update:type_name -> flow.PolicyUpdateNotification + 42, // 48: flow.AgentEvent.endpoint_regenerate:type_name -> flow.EndpointRegenNotification + 43, // 49: flow.AgentEvent.endpoint_update:type_name -> flow.EndpointUpdateNotification + 44, // 50: flow.AgentEvent.ipcache_update:type_name -> flow.IPCacheNotification + 46, // 51: flow.AgentEvent.service_upsert:type_name -> flow.ServiceUpsertNotification + 47, // 52: flow.AgentEvent.service_delete:type_name -> flow.ServiceDeleteNotification + 50, // 53: flow.TimeNotification.time:type_name -> google.protobuf.Timestamp + 53, // 54: flow.IPCacheNotification.old_identity:type_name -> google.protobuf.UInt32Value + 45, // 55: flow.ServiceUpsertNotification.frontend_address:type_name -> flow.ServiceUpsertNotificationAddr + 45, // 56: flow.ServiceUpsertNotification.backend_addresses:type_name -> flow.ServiceUpsertNotificationAddr + 13, // 57: flow.DebugEvent.type:type_name -> flow.DebugEventType + 19, // 58: flow.DebugEvent.source:type_name -> flow.Endpoint + 53, // 59: flow.DebugEvent.hash:type_name -> google.protobuf.UInt32Value + 53, // 60: flow.DebugEvent.arg1:type_name -> google.protobuf.UInt32Value + 53, // 61: flow.DebugEvent.arg2:type_name -> google.protobuf.UInt32Value + 53, // 62: flow.DebugEvent.arg3:type_name -> google.protobuf.UInt32Value + 52, // 63: flow.DebugEvent.cpu:type_name -> google.protobuf.Int32Value + 64, // [64:64] is the sub-list for method output_type + 64, // [64:64] is the sub-list for method input_type + 64, // [64:64] is the sub-list for extension type_name + 64, // [64:64] is the sub-list for extension extendee + 0, // [0:64] is the sub-list for field type_name } func init() { file_flow_flow_proto_init() } @@ -5826,7 +5905,7 @@ func file_flow_flow_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_flow_flow_proto_rawDesc, - NumEnums: 13, + NumEnums: 14, NumMessages: 36, NumExtensions: 0, NumServices: 0, diff --git a/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto index 8525f93a4..11143ea77 100644 --- a/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto +++ b/vendor/github.com/cilium/cilium/api/v1/flow/flow.proto @@ -21,6 +21,10 @@ message Flow { // deprecated in favor of drop_reason_desc. uint32 drop_reason = 3 [deprecated=true]; + // auth_type is the authentication type specified for the flow in Cilium Network Policy. + // Only set on policy verdict events. + AuthType auth_type = 35; + // l2 Ethernet ethernet = 4; // l3 @@ -119,6 +123,13 @@ enum FlowType { SOCK = 3; } +// These types correspond to definitions in pkg/policy/l4.go +enum AuthType { + DISABLED = 0; + SPIRE = 1; + TEST_ALWAYS_FAIL = 2; +} + enum TraceObservationPoint { // Cilium treats 0 as TO_LXC, but its's something we should work to remove. // This is intentionally set as unknown, so proto API can guarantee the @@ -378,6 +389,8 @@ enum DropReason { CT_NO_MAP_FOUND = 190; SNAT_NO_MAP_FOUND = 191; INVALID_CLUSTER_ID = 192; + UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP = 193; + NO_EGRESS_GATEWAY = 194; } enum TrafficDirection { diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/cilium_health_api_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/cilium_health_api_client.go deleted file mode 100644 index b6887e63d..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/cilium_health_api_client.go +++ /dev/null @@ -1,120 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package client - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "github.com/go-openapi/runtime" - httptransport "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/health/client/connectivity" - "github.com/cilium/cilium/api/v1/health/client/restapi" -) - -// Default cilium health API HTTP client. -var Default = NewHTTPClient(nil) - -const ( - // DefaultHost is the default Host - // found in Meta (info) section of spec file - DefaultHost string = "localhost" - // DefaultBasePath is the default BasePath - // found in Meta (info) section of spec file - DefaultBasePath string = "/v1beta" -) - -// DefaultSchemes are the default schemes found in Meta (info) section of spec file -var DefaultSchemes = []string{"http"} - -// NewHTTPClient creates a new cilium health API HTTP client. -func NewHTTPClient(formats strfmt.Registry) *CiliumHealthAPI { - return NewHTTPClientWithConfig(formats, nil) -} - -// NewHTTPClientWithConfig creates a new cilium health API HTTP client, -// using a customizable transport config. -func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *CiliumHealthAPI { - // ensure nullable parameters have default - if cfg == nil { - cfg = DefaultTransportConfig() - } - - // create transport and client - transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) - return New(transport, formats) -} - -// New creates a new cilium health API client -func New(transport runtime.ClientTransport, formats strfmt.Registry) *CiliumHealthAPI { - // ensure nullable parameters have default - if formats == nil { - formats = strfmt.Default - } - - cli := new(CiliumHealthAPI) - cli.Transport = transport - cli.Connectivity = connectivity.New(transport, formats) - cli.Restapi = restapi.New(transport, formats) - return cli -} - -// DefaultTransportConfig creates a TransportConfig with the -// default settings taken from the meta section of the spec file. -func DefaultTransportConfig() *TransportConfig { - return &TransportConfig{ - Host: DefaultHost, - BasePath: DefaultBasePath, - Schemes: DefaultSchemes, - } -} - -// TransportConfig contains the transport related info, -// found in the meta section of the spec file. -type TransportConfig struct { - Host string - BasePath string - Schemes []string -} - -// WithHost overrides the default host, -// provided by the meta section of the spec file. -func (cfg *TransportConfig) WithHost(host string) *TransportConfig { - cfg.Host = host - return cfg -} - -// WithBasePath overrides the default basePath, -// provided by the meta section of the spec file. -func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { - cfg.BasePath = basePath - return cfg -} - -// WithSchemes overrides the default schemes, -// provided by the meta section of the spec file. -func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { - cfg.Schemes = schemes - return cfg -} - -// CiliumHealthAPI is a client for cilium health API -type CiliumHealthAPI struct { - Connectivity connectivity.ClientService - - Restapi restapi.ClientService - - Transport runtime.ClientTransport -} - -// SetTransport changes the transport on the client and all its subresources -func (c *CiliumHealthAPI) SetTransport(transport runtime.ClientTransport) { - c.Transport = transport - c.Connectivity.SetTransport(transport) - c.Restapi.SetTransport(transport) -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go deleted file mode 100644 index b8918f4b2..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/connectivity_client.go +++ /dev/null @@ -1,130 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package connectivity - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new connectivity API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for connectivity API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error) - - PutStatusProbe(params *PutStatusProbeParams, opts ...ClientOption) (*PutStatusProbeOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* - GetStatus gets connectivity status of the cilium cluster - - Returns the connectivity status to all other cilium-health instances - -using interval-based probing. -*/ -func (a *Client) GetStatus(params *GetStatusParams, opts ...ClientOption) (*GetStatusOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetStatusParams() - } - op := &runtime.ClientOperation{ - ID: "GetStatus", - Method: "GET", - PathPattern: "/status", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetStatusReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetStatusOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetStatus: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -/* - PutStatusProbe runs synchronous connectivity probe to determine status of the cilium cluster - - Runs a synchronous probe to all other cilium-health instances and - -returns the connectivity status. -*/ -func (a *Client) PutStatusProbe(params *PutStatusProbeParams, opts ...ClientOption) (*PutStatusProbeOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewPutStatusProbeParams() - } - op := &runtime.ClientOperation{ - ID: "PutStatusProbe", - Method: "PUT", - PathPattern: "/status/probe", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &PutStatusProbeReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*PutStatusProbeOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for PutStatusProbe: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_parameters.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_parameters.go deleted file mode 100644 index bf0cf9138..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package connectivity - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetStatusParams creates a new GetStatusParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetStatusParams() *GetStatusParams { - return &GetStatusParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetStatusParamsWithTimeout creates a new GetStatusParams object -// with the ability to set a timeout on a request. -func NewGetStatusParamsWithTimeout(timeout time.Duration) *GetStatusParams { - return &GetStatusParams{ - timeout: timeout, - } -} - -// NewGetStatusParamsWithContext creates a new GetStatusParams object -// with the ability to set a context for a request. -func NewGetStatusParamsWithContext(ctx context.Context) *GetStatusParams { - return &GetStatusParams{ - Context: ctx, - } -} - -// NewGetStatusParamsWithHTTPClient creates a new GetStatusParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetStatusParamsWithHTTPClient(client *http.Client) *GetStatusParams { - return &GetStatusParams{ - HTTPClient: client, - } -} - -/* -GetStatusParams contains all the parameters to send to the API endpoint - - for the get status operation. - - Typically these are written to a http.Request. -*/ -type GetStatusParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get status params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetStatusParams) WithDefaults() *GetStatusParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get status params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetStatusParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get status params -func (o *GetStatusParams) WithTimeout(timeout time.Duration) *GetStatusParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get status params -func (o *GetStatusParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get status params -func (o *GetStatusParams) WithContext(ctx context.Context) *GetStatusParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get status params -func (o *GetStatusParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get status params -func (o *GetStatusParams) WithHTTPClient(client *http.Client) *GetStatusParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get status params -func (o *GetStatusParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go deleted file mode 100644 index cf977a0df..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/get_status_responses.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package connectivity - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/health/models" -) - -// GetStatusReader is a Reader for the GetStatus structure. -type GetStatusReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetStatusOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetStatusOK creates a GetStatusOK with default headers values -func NewGetStatusOK() *GetStatusOK { - return &GetStatusOK{} -} - -/* -GetStatusOK describes a response with status code 200, with default header values. - -Success -*/ -type GetStatusOK struct { - Payload *models.HealthStatusResponse -} - -// IsSuccess returns true when this get status o k response has a 2xx status code -func (o *GetStatusOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get status o k response has a 3xx status code -func (o *GetStatusOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get status o k response has a 4xx status code -func (o *GetStatusOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get status o k response has a 5xx status code -func (o *GetStatusOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get status o k response a status code equal to that given -func (o *GetStatusOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetStatusOK) Error() string { - return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload) -} - -func (o *GetStatusOK) String() string { - return fmt.Sprintf("[GET /status][%d] getStatusOK %+v", 200, o.Payload) -} - -func (o *GetStatusOK) GetPayload() *models.HealthStatusResponse { - return o.Payload -} - -func (o *GetStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.HealthStatusResponse) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_parameters.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_parameters.go deleted file mode 100644 index 8708a9a2a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package connectivity - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewPutStatusProbeParams creates a new PutStatusProbeParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewPutStatusProbeParams() *PutStatusProbeParams { - return &PutStatusProbeParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewPutStatusProbeParamsWithTimeout creates a new PutStatusProbeParams object -// with the ability to set a timeout on a request. -func NewPutStatusProbeParamsWithTimeout(timeout time.Duration) *PutStatusProbeParams { - return &PutStatusProbeParams{ - timeout: timeout, - } -} - -// NewPutStatusProbeParamsWithContext creates a new PutStatusProbeParams object -// with the ability to set a context for a request. -func NewPutStatusProbeParamsWithContext(ctx context.Context) *PutStatusProbeParams { - return &PutStatusProbeParams{ - Context: ctx, - } -} - -// NewPutStatusProbeParamsWithHTTPClient creates a new PutStatusProbeParams object -// with the ability to set a custom HTTPClient for a request. -func NewPutStatusProbeParamsWithHTTPClient(client *http.Client) *PutStatusProbeParams { - return &PutStatusProbeParams{ - HTTPClient: client, - } -} - -/* -PutStatusProbeParams contains all the parameters to send to the API endpoint - - for the put status probe operation. - - Typically these are written to a http.Request. -*/ -type PutStatusProbeParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the put status probe params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutStatusProbeParams) WithDefaults() *PutStatusProbeParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the put status probe params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *PutStatusProbeParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the put status probe params -func (o *PutStatusProbeParams) WithTimeout(timeout time.Duration) *PutStatusProbeParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the put status probe params -func (o *PutStatusProbeParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the put status probe params -func (o *PutStatusProbeParams) WithContext(ctx context.Context) *PutStatusProbeParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the put status probe params -func (o *PutStatusProbeParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the put status probe params -func (o *PutStatusProbeParams) WithHTTPClient(client *http.Client) *PutStatusProbeParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the put status probe params -func (o *PutStatusProbeParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *PutStatusProbeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go deleted file mode 100644 index b69a43b3c..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/connectivity/put_status_probe_responses.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package connectivity - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/health/models" -) - -// PutStatusProbeReader is a Reader for the PutStatusProbe structure. -type PutStatusProbeReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *PutStatusProbeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewPutStatusProbeOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewPutStatusProbeFailed() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewPutStatusProbeOK creates a PutStatusProbeOK with default headers values -func NewPutStatusProbeOK() *PutStatusProbeOK { - return &PutStatusProbeOK{} -} - -/* -PutStatusProbeOK describes a response with status code 200, with default header values. - -Success -*/ -type PutStatusProbeOK struct { - Payload *models.HealthStatusResponse -} - -// IsSuccess returns true when this put status probe o k response has a 2xx status code -func (o *PutStatusProbeOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this put status probe o k response has a 3xx status code -func (o *PutStatusProbeOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put status probe o k response has a 4xx status code -func (o *PutStatusProbeOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this put status probe o k response has a 5xx status code -func (o *PutStatusProbeOK) IsServerError() bool { - return false -} - -// IsCode returns true when this put status probe o k response a status code equal to that given -func (o *PutStatusProbeOK) IsCode(code int) bool { - return code == 200 -} - -func (o *PutStatusProbeOK) Error() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %+v", 200, o.Payload) -} - -func (o *PutStatusProbeOK) String() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeOK %+v", 200, o.Payload) -} - -func (o *PutStatusProbeOK) GetPayload() *models.HealthStatusResponse { - return o.Payload -} - -func (o *PutStatusProbeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.HealthStatusResponse) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewPutStatusProbeFailed creates a PutStatusProbeFailed with default headers values -func NewPutStatusProbeFailed() *PutStatusProbeFailed { - return &PutStatusProbeFailed{} -} - -/* -PutStatusProbeFailed describes a response with status code 500, with default header values. - -Internal error occurred while conducting connectivity probe -*/ -type PutStatusProbeFailed struct { - Payload models.Error -} - -// IsSuccess returns true when this put status probe failed response has a 2xx status code -func (o *PutStatusProbeFailed) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this put status probe failed response has a 3xx status code -func (o *PutStatusProbeFailed) IsRedirect() bool { - return false -} - -// IsClientError returns true when this put status probe failed response has a 4xx status code -func (o *PutStatusProbeFailed) IsClientError() bool { - return false -} - -// IsServerError returns true when this put status probe failed response has a 5xx status code -func (o *PutStatusProbeFailed) IsServerError() bool { - return true -} - -// IsCode returns true when this put status probe failed response a status code equal to that given -func (o *PutStatusProbeFailed) IsCode(code int) bool { - return code == 500 -} - -func (o *PutStatusProbeFailed) Error() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %+v", 500, o.Payload) -} - -func (o *PutStatusProbeFailed) String() string { - return fmt.Sprintf("[PUT /status/probe][%d] putStatusProbeFailed %+v", 500, o.Payload) -} - -func (o *PutStatusProbeFailed) GetPayload() models.Error { - return o.Payload -} - -func (o *PutStatusProbeFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_parameters.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_parameters.go deleted file mode 100644 index 3eb0ce8c8..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_parameters.go +++ /dev/null @@ -1,131 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package restapi - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "net/http" - "time" - - "github.com/go-openapi/errors" - "github.com/go-openapi/runtime" - cr "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" -) - -// NewGetHealthzParams creates a new GetHealthzParams object, -// with the default timeout for this client. -// -// Default values are not hydrated, since defaults are normally applied by the API server side. -// -// To enforce default values in parameter, use SetDefaults or WithDefaults. -func NewGetHealthzParams() *GetHealthzParams { - return &GetHealthzParams{ - timeout: cr.DefaultTimeout, - } -} - -// NewGetHealthzParamsWithTimeout creates a new GetHealthzParams object -// with the ability to set a timeout on a request. -func NewGetHealthzParamsWithTimeout(timeout time.Duration) *GetHealthzParams { - return &GetHealthzParams{ - timeout: timeout, - } -} - -// NewGetHealthzParamsWithContext creates a new GetHealthzParams object -// with the ability to set a context for a request. -func NewGetHealthzParamsWithContext(ctx context.Context) *GetHealthzParams { - return &GetHealthzParams{ - Context: ctx, - } -} - -// NewGetHealthzParamsWithHTTPClient creates a new GetHealthzParams object -// with the ability to set a custom HTTPClient for a request. -func NewGetHealthzParamsWithHTTPClient(client *http.Client) *GetHealthzParams { - return &GetHealthzParams{ - HTTPClient: client, - } -} - -/* -GetHealthzParams contains all the parameters to send to the API endpoint - - for the get healthz operation. - - Typically these are written to a http.Request. -*/ -type GetHealthzParams struct { - timeout time.Duration - Context context.Context - HTTPClient *http.Client -} - -// WithDefaults hydrates default values in the get healthz params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetHealthzParams) WithDefaults() *GetHealthzParams { - o.SetDefaults() - return o -} - -// SetDefaults hydrates default values in the get healthz params (not the query body). -// -// All values with no default are reset to their zero value. -func (o *GetHealthzParams) SetDefaults() { - // no default values defined for this parameter -} - -// WithTimeout adds the timeout to the get healthz params -func (o *GetHealthzParams) WithTimeout(timeout time.Duration) *GetHealthzParams { - o.SetTimeout(timeout) - return o -} - -// SetTimeout adds the timeout to the get healthz params -func (o *GetHealthzParams) SetTimeout(timeout time.Duration) { - o.timeout = timeout -} - -// WithContext adds the context to the get healthz params -func (o *GetHealthzParams) WithContext(ctx context.Context) *GetHealthzParams { - o.SetContext(ctx) - return o -} - -// SetContext adds the context to the get healthz params -func (o *GetHealthzParams) SetContext(ctx context.Context) { - o.Context = ctx -} - -// WithHTTPClient adds the HTTPClient to the get healthz params -func (o *GetHealthzParams) WithHTTPClient(client *http.Client) *GetHealthzParams { - o.SetHTTPClient(client) - return o -} - -// SetHTTPClient adds the HTTPClient to the get healthz params -func (o *GetHealthzParams) SetHTTPClient(client *http.Client) { - o.HTTPClient = client -} - -// WriteToRequest writes these params to a swagger request -func (o *GetHealthzParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { - - if err := r.SetTimeout(o.timeout); err != nil { - return err - } - var res []error - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go deleted file mode 100644 index c30a21186..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/get_healthz_responses.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package restapi - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - "io" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - - "github.com/cilium/cilium/api/v1/health/models" -) - -// GetHealthzReader is a Reader for the GetHealthz structure. -type GetHealthzReader struct { - formats strfmt.Registry -} - -// ReadResponse reads a server response into the received o. -func (o *GetHealthzReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - switch response.Code() { - case 200: - result := NewGetHealthzOK() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return result, nil - case 500: - result := NewGetHealthzFailed() - if err := result.readResponse(response, consumer, o.formats); err != nil { - return nil, err - } - return nil, result - default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) - } -} - -// NewGetHealthzOK creates a GetHealthzOK with default headers values -func NewGetHealthzOK() *GetHealthzOK { - return &GetHealthzOK{} -} - -/* -GetHealthzOK describes a response with status code 200, with default header values. - -Success -*/ -type GetHealthzOK struct { - Payload *models.HealthResponse -} - -// IsSuccess returns true when this get healthz o k response has a 2xx status code -func (o *GetHealthzOK) IsSuccess() bool { - return true -} - -// IsRedirect returns true when this get healthz o k response has a 3xx status code -func (o *GetHealthzOK) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get healthz o k response has a 4xx status code -func (o *GetHealthzOK) IsClientError() bool { - return false -} - -// IsServerError returns true when this get healthz o k response has a 5xx status code -func (o *GetHealthzOK) IsServerError() bool { - return false -} - -// IsCode returns true when this get healthz o k response a status code equal to that given -func (o *GetHealthzOK) IsCode(code int) bool { - return code == 200 -} - -func (o *GetHealthzOK) Error() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) -} - -func (o *GetHealthzOK) String() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzOK %+v", 200, o.Payload) -} - -func (o *GetHealthzOK) GetPayload() *models.HealthResponse { - return o.Payload -} - -func (o *GetHealthzOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - o.Payload = new(models.HealthResponse) - - // response payload - if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} - -// NewGetHealthzFailed creates a GetHealthzFailed with default headers values -func NewGetHealthzFailed() *GetHealthzFailed { - return &GetHealthzFailed{} -} - -/* -GetHealthzFailed describes a response with status code 500, with default header values. - -Failed to contact local Cilium daemon -*/ -type GetHealthzFailed struct { - Payload models.Error -} - -// IsSuccess returns true when this get healthz failed response has a 2xx status code -func (o *GetHealthzFailed) IsSuccess() bool { - return false -} - -// IsRedirect returns true when this get healthz failed response has a 3xx status code -func (o *GetHealthzFailed) IsRedirect() bool { - return false -} - -// IsClientError returns true when this get healthz failed response has a 4xx status code -func (o *GetHealthzFailed) IsClientError() bool { - return false -} - -// IsServerError returns true when this get healthz failed response has a 5xx status code -func (o *GetHealthzFailed) IsServerError() bool { - return true -} - -// IsCode returns true when this get healthz failed response a status code equal to that given -func (o *GetHealthzFailed) IsCode(code int) bool { - return code == 500 -} - -func (o *GetHealthzFailed) Error() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %+v", 500, o.Payload) -} - -func (o *GetHealthzFailed) String() string { - return fmt.Sprintf("[GET /healthz][%d] getHealthzFailed %+v", 500, o.Payload) -} - -func (o *GetHealthzFailed) GetPayload() models.Error { - return o.Payload -} - -func (o *GetHealthzFailed) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { - - // response payload - if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { - return err - } - - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go b/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go deleted file mode 100644 index 6e5b1614a..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/client/restapi/restapi_client.go +++ /dev/null @@ -1,87 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package restapi - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "fmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" -) - -// New creates a new restapi API client. -func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { - return &Client{transport: transport, formats: formats} -} - -/* -Client for restapi API -*/ -type Client struct { - transport runtime.ClientTransport - formats strfmt.Registry -} - -// ClientOption is the option for Client methods -type ClientOption func(*runtime.ClientOperation) - -// ClientService is the interface for Client methods -type ClientService interface { - GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) - - SetTransport(transport runtime.ClientTransport) -} - -/* - GetHealthz gets health of cilium node - - Returns health and status information of the local node including - -load and uptime, as well as the status of related components including -the Cilium daemon. -*/ -func (a *Client) GetHealthz(params *GetHealthzParams, opts ...ClientOption) (*GetHealthzOK, error) { - // TODO: Validate the params before sending - if params == nil { - params = NewGetHealthzParams() - } - op := &runtime.ClientOperation{ - ID: "GetHealthz", - Method: "GET", - PathPattern: "/healthz", - ProducesMediaTypes: []string{"application/json"}, - ConsumesMediaTypes: []string{"application/json"}, - Schemes: []string{"http"}, - Params: params, - Reader: &GetHealthzReader{formats: a.formats}, - Context: params.Context, - Client: params.HTTPClient, - } - for _, opt := range opts { - opt(op) - } - - result, err := a.transport.Submit(op) - if err != nil { - return nil, err - } - success, ok := result.(*GetHealthzOK) - if ok { - return success, nil - } - // unexpected success response - // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue - msg := fmt.Sprintf("unexpected success response for GetHealthz: API contract not enforced by server. Client expected to get an error, but got: %T", result) - panic(msg) -} - -// SetTransport changes the transport on the client -func (a *Client) SetTransport(transport runtime.ClientTransport) { - a.transport = transport -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/connectivity_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/connectivity_status.go deleted file mode 100644 index 123e3b416..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/connectivity_status.go +++ /dev/null @@ -1,56 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// ConnectivityStatus Connectivity status of a path -// -// swagger:model ConnectivityStatus -type ConnectivityStatus struct { - - // Round trip time to node in nanoseconds - Latency int64 `json:"latency,omitempty"` - - // Human readable status/error/warning message - Status string `json:"status,omitempty"` -} - -// Validate validates this connectivity status -func (m *ConnectivityStatus) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this connectivity status based on context it is used -func (m *ConnectivityStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *ConnectivityStatus) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *ConnectivityStatus) UnmarshalBinary(b []byte) error { - var res ConnectivityStatus - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go deleted file mode 100644 index 4b8c9daa2..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/endpoint_status.go +++ /dev/null @@ -1,165 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// EndpointStatus Connectivity status to host cilium-health endpoints via different paths -// -// swagger:model EndpointStatus -type EndpointStatus struct { - - // primary address - PrimaryAddress *PathStatus `json:"primary-address,omitempty"` - - // secondary addresses - SecondaryAddresses []*PathStatus `json:"secondary-addresses"` -} - -// Validate validates this endpoint status -func (m *EndpointStatus) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePrimaryAddress(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSecondaryAddresses(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *EndpointStatus) validatePrimaryAddress(formats strfmt.Registry) error { - if swag.IsZero(m.PrimaryAddress) { // not required - return nil - } - - if m.PrimaryAddress != nil { - if err := m.PrimaryAddress.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("primary-address") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("primary-address") - } - return err - } - } - - return nil -} - -func (m *EndpointStatus) validateSecondaryAddresses(formats strfmt.Registry) error { - if swag.IsZero(m.SecondaryAddresses) { // not required - return nil - } - - for i := 0; i < len(m.SecondaryAddresses); i++ { - if swag.IsZero(m.SecondaryAddresses[i]) { // not required - continue - } - - if m.SecondaryAddresses[i] != nil { - if err := m.SecondaryAddresses[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this endpoint status based on the context it is used -func (m *EndpointStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *EndpointStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error { - - if m.PrimaryAddress != nil { - if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("primary-address") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("primary-address") - } - return err - } - } - - return nil -} - -func (m *EndpointStatus) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.SecondaryAddresses); i++ { - - if m.SecondaryAddresses[i] != nil { - if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *EndpointStatus) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *EndpointStatus) UnmarshalBinary(b []byte) error { - var res EndpointStatus - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/error.go b/vendor/github.com/cilium/cilium/api/v1/health/models/error.go deleted file mode 100644 index 83eccc860..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/error.go +++ /dev/null @@ -1,30 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" -) - -// Error error -// -// swagger:model error -type Error string - -// Validate validates this error -func (m Error) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this error based on context it is used -func (m Error) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go deleted file mode 100644 index d74c7044e..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/health_response.go +++ /dev/null @@ -1,153 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - ciliumModels "github.com/cilium/cilium/api/v1/models" - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// HealthResponse Health and status information of local node -// -// swagger:model HealthResponse -type HealthResponse struct { - - // Status of Cilium daemon - Cilium ciliumModels.StatusResponse `json:"cilium,omitempty"` - - // System load on node - SystemLoad *LoadResponse `json:"system-load,omitempty"` - - // Uptime of cilium-health instance - Uptime string `json:"uptime,omitempty"` -} - -// Validate validates this health response -func (m *HealthResponse) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateCilium(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSystemLoad(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HealthResponse) validateCilium(formats strfmt.Registry) error { - if swag.IsZero(m.Cilium) { // not required - return nil - } - - if err := m.Cilium.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("cilium") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("cilium") - } - return err - } - - return nil -} - -func (m *HealthResponse) validateSystemLoad(formats strfmt.Registry) error { - if swag.IsZero(m.SystemLoad) { // not required - return nil - } - - if m.SystemLoad != nil { - if err := m.SystemLoad.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("system-load") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("system-load") - } - return err - } - } - - return nil -} - -// ContextValidate validate this health response based on the context it is used -func (m *HealthResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateCilium(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSystemLoad(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HealthResponse) contextValidateCilium(ctx context.Context, formats strfmt.Registry) error { - - if err := m.Cilium.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("cilium") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("cilium") - } - return err - } - - return nil -} - -func (m *HealthResponse) contextValidateSystemLoad(ctx context.Context, formats strfmt.Registry) error { - - if m.SystemLoad != nil { - if err := m.SystemLoad.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("system-load") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("system-load") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HealthResponse) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HealthResponse) UnmarshalBinary(b []byte) error { - var res HealthResponse - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go deleted file mode 100644 index 9f4a29d78..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/health_status_response.go +++ /dev/null @@ -1,168 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// HealthStatusResponse Connectivity status to other daemons -// -// swagger:model HealthStatusResponse -type HealthStatusResponse struct { - - // Description of the local node - Local *SelfStatus `json:"local,omitempty"` - - // Connectivity status to each other node - Nodes []*NodeStatus `json:"nodes"` - - // timestamp - Timestamp string `json:"timestamp,omitempty"` -} - -// Validate validates this health status response -func (m *HealthStatusResponse) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateLocal(formats); err != nil { - res = append(res, err) - } - - if err := m.validateNodes(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HealthStatusResponse) validateLocal(formats strfmt.Registry) error { - if swag.IsZero(m.Local) { // not required - return nil - } - - if m.Local != nil { - if err := m.Local.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("local") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("local") - } - return err - } - } - - return nil -} - -func (m *HealthStatusResponse) validateNodes(formats strfmt.Registry) error { - if swag.IsZero(m.Nodes) { // not required - return nil - } - - for i := 0; i < len(m.Nodes); i++ { - if swag.IsZero(m.Nodes[i]) { // not required - continue - } - - if m.Nodes[i] != nil { - if err := m.Nodes[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("nodes" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("nodes" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this health status response based on the context it is used -func (m *HealthStatusResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateLocal(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateNodes(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HealthStatusResponse) contextValidateLocal(ctx context.Context, formats strfmt.Registry) error { - - if m.Local != nil { - if err := m.Local.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("local") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("local") - } - return err - } - } - - return nil -} - -func (m *HealthStatusResponse) contextValidateNodes(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.Nodes); i++ { - - if m.Nodes[i] != nil { - if err := m.Nodes[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("nodes" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("nodes" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HealthStatusResponse) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HealthStatusResponse) UnmarshalBinary(b []byte) error { - var res HealthStatusResponse - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go deleted file mode 100644 index da114cf19..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/host_status.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// HostStatus Connectivity status to host cilium-health instance via different paths, -// probing via all known IP addresses -// -// swagger:model HostStatus -type HostStatus struct { - - // primary address - PrimaryAddress *PathStatus `json:"primary-address,omitempty"` - - // secondary addresses - SecondaryAddresses []*PathStatus `json:"secondary-addresses"` -} - -// Validate validates this host status -func (m *HostStatus) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validatePrimaryAddress(formats); err != nil { - res = append(res, err) - } - - if err := m.validateSecondaryAddresses(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HostStatus) validatePrimaryAddress(formats strfmt.Registry) error { - if swag.IsZero(m.PrimaryAddress) { // not required - return nil - } - - if m.PrimaryAddress != nil { - if err := m.PrimaryAddress.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("primary-address") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("primary-address") - } - return err - } - } - - return nil -} - -func (m *HostStatus) validateSecondaryAddresses(formats strfmt.Registry) error { - if swag.IsZero(m.SecondaryAddresses) { // not required - return nil - } - - for i := 0; i < len(m.SecondaryAddresses); i++ { - if swag.IsZero(m.SecondaryAddresses[i]) { // not required - continue - } - - if m.SecondaryAddresses[i] != nil { - if err := m.SecondaryAddresses[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// ContextValidate validate this host status based on the context it is used -func (m *HostStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidatePrimaryAddress(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateSecondaryAddresses(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *HostStatus) contextValidatePrimaryAddress(ctx context.Context, formats strfmt.Registry) error { - - if m.PrimaryAddress != nil { - if err := m.PrimaryAddress.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("primary-address") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("primary-address") - } - return err - } - } - - return nil -} - -func (m *HostStatus) contextValidateSecondaryAddresses(ctx context.Context, formats strfmt.Registry) error { - - for i := 0; i < len(m.SecondaryAddresses); i++ { - - if m.SecondaryAddresses[i] != nil { - if err := m.SecondaryAddresses[i].ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("secondary-addresses" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - -// MarshalBinary interface implementation -func (m *HostStatus) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *HostStatus) UnmarshalBinary(b []byte) error { - var res HostStatus - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/load_response.go b/vendor/github.com/cilium/cilium/api/v1/health/models/load_response.go deleted file mode 100644 index d0ceddba7..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/load_response.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// LoadResponse System load on node -// -// swagger:model LoadResponse -type LoadResponse struct { - - // Load average over the past 15 minutes - Last15min string `json:"last15min,omitempty"` - - // Load average over the past minute - Last1min string `json:"last1min,omitempty"` - - // Load average over the past 5 minutes - Last5min string `json:"last5min,omitempty"` -} - -// Validate validates this load response -func (m *LoadResponse) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this load response based on context it is used -func (m *LoadResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *LoadResponse) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *LoadResponse) UnmarshalBinary(b []byte) error { - var res LoadResponse - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go deleted file mode 100644 index 1bbd99533..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/node_status.go +++ /dev/null @@ -1,204 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// NodeStatus Connectivity status of a remote cilium-health instance -// -// swagger:model NodeStatus -type NodeStatus struct { - - // DEPRECATED: Please use the health-endpoint field instead, which - // supports reporting the status of different addresses for the endpoint - // - Endpoint *PathStatus `json:"endpoint,omitempty"` - - // Connectivity status to simulated endpoint on the node - HealthEndpoint *EndpointStatus `json:"health-endpoint,omitempty"` - - // Connectivity status to cilium-health instance on node IP - Host *HostStatus `json:"host,omitempty"` - - // Identifying name for the node - Name string `json:"name,omitempty"` -} - -// Validate validates this node status -func (m *NodeStatus) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateEndpoint(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHealthEndpoint(formats); err != nil { - res = append(res, err) - } - - if err := m.validateHost(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *NodeStatus) validateEndpoint(formats strfmt.Registry) error { - if swag.IsZero(m.Endpoint) { // not required - return nil - } - - if m.Endpoint != nil { - if err := m.Endpoint.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("endpoint") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("endpoint") - } - return err - } - } - - return nil -} - -func (m *NodeStatus) validateHealthEndpoint(formats strfmt.Registry) error { - if swag.IsZero(m.HealthEndpoint) { // not required - return nil - } - - if m.HealthEndpoint != nil { - if err := m.HealthEndpoint.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("health-endpoint") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("health-endpoint") - } - return err - } - } - - return nil -} - -func (m *NodeStatus) validateHost(formats strfmt.Registry) error { - if swag.IsZero(m.Host) { // not required - return nil - } - - if m.Host != nil { - if err := m.Host.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("host") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("host") - } - return err - } - } - - return nil -} - -// ContextValidate validate this node status based on the context it is used -func (m *NodeStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateEndpoint(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHealthEndpoint(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateHost(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *NodeStatus) contextValidateEndpoint(ctx context.Context, formats strfmt.Registry) error { - - if m.Endpoint != nil { - if err := m.Endpoint.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("endpoint") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("endpoint") - } - return err - } - } - - return nil -} - -func (m *NodeStatus) contextValidateHealthEndpoint(ctx context.Context, formats strfmt.Registry) error { - - if m.HealthEndpoint != nil { - if err := m.HealthEndpoint.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("health-endpoint") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("health-endpoint") - } - return err - } - } - - return nil -} - -func (m *NodeStatus) contextValidateHost(ctx context.Context, formats strfmt.Registry) error { - - if m.Host != nil { - if err := m.Host.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("host") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("host") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *NodeStatus) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *NodeStatus) UnmarshalBinary(b []byte) error { - var res NodeStatus - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go deleted file mode 100644 index 3a8669f3c..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/path_status.go +++ /dev/null @@ -1,157 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/errors" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// PathStatus Connectivity status via different paths, for example using different -// policies or service redirection -// -// swagger:model PathStatus -type PathStatus struct { - - // Connectivity status without policy applied - HTTP *ConnectivityStatus `json:"http,omitempty"` - - // Basic ping connectivity status to node IP - Icmp *ConnectivityStatus `json:"icmp,omitempty"` - - // IP address queried for the connectivity status - IP string `json:"ip,omitempty"` -} - -// Validate validates this path status -func (m *PathStatus) Validate(formats strfmt.Registry) error { - var res []error - - if err := m.validateHTTP(formats); err != nil { - res = append(res, err) - } - - if err := m.validateIcmp(formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *PathStatus) validateHTTP(formats strfmt.Registry) error { - if swag.IsZero(m.HTTP) { // not required - return nil - } - - if m.HTTP != nil { - if err := m.HTTP.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("http") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("http") - } - return err - } - } - - return nil -} - -func (m *PathStatus) validateIcmp(formats strfmt.Registry) error { - if swag.IsZero(m.Icmp) { // not required - return nil - } - - if m.Icmp != nil { - if err := m.Icmp.Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("icmp") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("icmp") - } - return err - } - } - - return nil -} - -// ContextValidate validate this path status based on the context it is used -func (m *PathStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - var res []error - - if err := m.contextValidateHTTP(ctx, formats); err != nil { - res = append(res, err) - } - - if err := m.contextValidateIcmp(ctx, formats); err != nil { - res = append(res, err) - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -func (m *PathStatus) contextValidateHTTP(ctx context.Context, formats strfmt.Registry) error { - - if m.HTTP != nil { - if err := m.HTTP.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("http") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("http") - } - return err - } - } - - return nil -} - -func (m *PathStatus) contextValidateIcmp(ctx context.Context, formats strfmt.Registry) error { - - if m.Icmp != nil { - if err := m.Icmp.ContextValidate(ctx, formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("icmp") - } else if ce, ok := err.(*errors.CompositeError); ok { - return ce.ValidateName("icmp") - } - return err - } - } - - return nil -} - -// MarshalBinary interface implementation -func (m *PathStatus) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *PathStatus) UnmarshalBinary(b []byte) error { - var res PathStatus - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/health/models/self_status.go b/vendor/github.com/cilium/cilium/api/v1/health/models/self_status.go deleted file mode 100644 index 0f8609367..000000000 --- a/vendor/github.com/cilium/cilium/api/v1/health/models/self_status.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by go-swagger; DO NOT EDIT. - -// Copyright Authors of Cilium -// SPDX-License-Identifier: Apache-2.0 - -package models - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -import ( - "context" - - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" -) - -// SelfStatus Description of the cilium-health node -// -// swagger:model SelfStatus -type SelfStatus struct { - - // Name associated with this node - Name string `json:"name,omitempty"` -} - -// Validate validates this self status -func (m *SelfStatus) Validate(formats strfmt.Registry) error { - return nil -} - -// ContextValidate validates this self status based on context it is used -func (m *SelfStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error { - return nil -} - -// MarshalBinary interface implementation -func (m *SelfStatus) MarshalBinary() ([]byte, error) { - if m == nil { - return nil, nil - } - return swag.WriteJSON(m) -} - -// UnmarshalBinary interface implementation -func (m *SelfStatus) UnmarshalBinary(b []byte) error { - var res SelfStatus - if err := swag.ReadJSON(b, &res); err != nil { - return err - } - *m = res - return nil -} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/address_pair.go b/vendor/github.com/cilium/cilium/api/v1/models/address_pair.go index 4b20ef515..302dce71c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/address_pair.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/address_pair.go @@ -26,11 +26,17 @@ type AddressPair struct { // UUID of IPv4 expiration timer IPV4ExpirationUUID string `json:"ipv4-expiration-uuid,omitempty"` + // IPAM pool from which this IPv4 address was allocated + IPV4PoolName string `json:"ipv4-pool-name,omitempty"` + // IPv6 address IPV6 string `json:"ipv6,omitempty"` // UUID of IPv6 expiration timer IPV6ExpirationUUID string `json:"ipv6-expiration-uuid,omitempty"` + + // IPAM pool from which this IPv6 address was allocated + IPV6PoolName string `json:"ipv6-pool-name,omitempty"` } // Validate validates this address pair diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_graceful_restart.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_graceful_restart.go new file mode 100644 index 000000000..d24845f9f --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_graceful_restart.go @@ -0,0 +1,61 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpGracefulRestart BGP graceful restart parameters negotiated with the peer. +// +// +k8s:deepcopy-gen=true +// +// swagger:model BgpGracefulRestart +type BgpGracefulRestart struct { + + // When set, graceful restart capability is negotiated for all AFI/SAFIs of + // this peer. + Enabled bool `json:"enabled,omitempty"` + + // This is the time advertised to peer for the BGP session to be re-established + // after a restart. After this period, peer will remove stale routes. + // (RFC 4724 section 4.2) + RestartTimeSeconds int64 `json:"restart-time-seconds,omitempty"` +} + +// Validate validates this bgp graceful restart +func (m *BgpGracefulRestart) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp graceful restart based on context it is used +func (m *BgpGracefulRestart) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpGracefulRestart) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpGracefulRestart) UnmarshalBinary(b []byte) error { + var res BgpGracefulRestart + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go new file mode 100644 index 000000000..b98bbd495 --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer.go @@ -0,0 +1,238 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// BgpPeer State of a BGP Peer +// +// +k8s:deepcopy-gen=true +// +// swagger:model BgpPeer +type BgpPeer struct { + + // Applied initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds. + // The applied value holds the value that is in effect on the current BGP session. + // + AppliedHoldTimeSeconds int64 `json:"applied-hold-time-seconds,omitempty"` + + // Applied initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds. + // The applied value holds the value that is in effect on the current BGP session. + // + AppliedKeepAliveTimeSeconds int64 `json:"applied-keep-alive-time-seconds,omitempty"` + + // Configured initial value for the BGP HoldTimer (RFC 4271, Section 4.2) in seconds. + // The configured value will be used for negotiation with the peer during the BGP session establishment. + // + ConfiguredHoldTimeSeconds int64 `json:"configured-hold-time-seconds,omitempty"` + + // Configured initial value for the BGP KeepaliveTimer (RFC 4271, Section 8) in seconds. + // The applied value may be different than the configured value, as it depends on the negotiated hold time interval. + // + ConfiguredKeepAliveTimeSeconds int64 `json:"configured-keep-alive-time-seconds,omitempty"` + + // Initial value for the BGP ConnectRetryTimer (RFC 4271, Section 8) in seconds + ConnectRetryTimeSeconds int64 `json:"connect-retry-time-seconds,omitempty"` + + // Time To Live (TTL) value used in BGP packets sent to the eBGP neighbor. + // 0 if eBGP multi-hop feature is disabled. + // + EbgpMultihopTTL int64 `json:"ebgp-multihop-ttl,omitempty"` + + // BGP peer address family state + Families []*BgpPeerFamilies `json:"families"` + + // Graceful restart capability + GracefulRestart *BgpGracefulRestart `json:"graceful-restart,omitempty"` + + // Local AS Number + LocalAsn int64 `json:"local-asn,omitempty"` + + // IP Address of peer + PeerAddress string `json:"peer-address,omitempty"` + + // Peer AS Number + PeerAsn int64 `json:"peer-asn,omitempty"` + + // TCP port number of peer + // Maximum: 65535 + // Minimum: 1 + PeerPort int64 `json:"peer-port,omitempty"` + + // BGP peer operational state as described here + // https://www.rfc-editor.org/rfc/rfc4271#section-8.2.2 + // + SessionState string `json:"session-state,omitempty"` + + // BGP peer connection uptime in nano seconds. + UptimeNanoseconds int64 `json:"uptime-nanoseconds,omitempty"` +} + +// Validate validates this bgp peer +func (m *BgpPeer) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFamilies(formats); err != nil { + res = append(res, err) + } + + if err := m.validateGracefulRestart(formats); err != nil { + res = append(res, err) + } + + if err := m.validatePeerPort(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpPeer) validateFamilies(formats strfmt.Registry) error { + if swag.IsZero(m.Families) { // not required + return nil + } + + for i := 0; i < len(m.Families); i++ { + if swag.IsZero(m.Families[i]) { // not required + continue + } + + if m.Families[i] != nil { + if err := m.Families[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("families" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("families" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BgpPeer) validateGracefulRestart(formats strfmt.Registry) error { + if swag.IsZero(m.GracefulRestart) { // not required + return nil + } + + if m.GracefulRestart != nil { + if err := m.GracefulRestart.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("graceful-restart") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("graceful-restart") + } + return err + } + } + + return nil +} + +func (m *BgpPeer) validatePeerPort(formats strfmt.Registry) error { + if swag.IsZero(m.PeerPort) { // not required + return nil + } + + if err := validate.MinimumInt("peer-port", "body", m.PeerPort, 1, false); err != nil { + return err + } + + if err := validate.MaximumInt("peer-port", "body", m.PeerPort, 65535, false); err != nil { + return err + } + + return nil +} + +// ContextValidate validate this bgp peer based on the context it is used +func (m *BgpPeer) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := m.contextValidateFamilies(ctx, formats); err != nil { + res = append(res, err) + } + + if err := m.contextValidateGracefulRestart(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *BgpPeer) contextValidateFamilies(ctx context.Context, formats strfmt.Registry) error { + + for i := 0; i < len(m.Families); i++ { + + if m.Families[i] != nil { + if err := m.Families[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("families" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("families" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *BgpPeer) contextValidateGracefulRestart(ctx context.Context, formats strfmt.Registry) error { + + if m.GracefulRestart != nil { + if err := m.GracefulRestart.ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("graceful-restart") + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("graceful-restart") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *BgpPeer) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpPeer) UnmarshalBinary(b []byte) error { + var res BgpPeer + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer_families.go b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer_families.go new file mode 100644 index 000000000..aac6013ad --- /dev/null +++ b/vendor/github.com/cilium/cilium/api/v1/models/bgp_peer_families.go @@ -0,0 +1,67 @@ +// Code generated by go-swagger; DO NOT EDIT. + +// Copyright Authors of Cilium +// SPDX-License-Identifier: Apache-2.0 + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// BgpPeerFamilies BGP AFI SAFI state of the peer +// +// +k8s:deepcopy-gen=true +// +// swagger:model BgpPeerFamilies +type BgpPeerFamilies struct { + + // Number of routes accepted from the peer of this address family + Accepted int64 `json:"accepted,omitempty"` + + // Number of routes advertised of this address family to the peer + Advertised int64 `json:"advertised,omitempty"` + + // BGP address family indicator + Afi string `json:"afi,omitempty"` + + // Number of routes received from the peer of this address family + Received int64 `json:"received,omitempty"` + + // BGP subsequent address family indicator + Safi string `json:"safi,omitempty"` +} + +// Validate validates this bgp peer families +func (m *BgpPeerFamilies) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this bgp peer families based on context it is used +func (m *BgpPeerFamilies) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *BgpPeerFamilies) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *BgpPeerFamilies) UnmarshalBinary(b []byte) error { + var res BgpPeerFamilies + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go index 3fedfb0e4..7d60f3b10 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/kube_proxy_replacement.go @@ -283,6 +283,9 @@ func (m *KubeProxyReplacementDeviceListItems0) UnmarshalBinary(b []byte) error { // swagger:model KubeProxyReplacementFeatures type KubeProxyReplacementFeatures struct { + // flag bpf-lb-sock-hostns-only + BpfSocketLBHostnsOnly bool `json:"bpfSocketLBHostnsOnly,omitempty"` + // external i ps ExternalIPs *KubeProxyReplacementFeaturesExternalIPs `json:"externalIPs,omitempty"` diff --git a/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go b/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go index 3597959d9..d3283687d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/proxy_status.go @@ -10,11 +10,13 @@ package models import ( "context" + "encoding/json" "strconv" "github.com/go-openapi/errors" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" + "github.com/go-openapi/validate" ) // ProxyStatus Status of proxy @@ -24,6 +26,10 @@ import ( // swagger:model ProxyStatus type ProxyStatus struct { + // Deployment mode of Envoy L7 proxy + // Enum: [embedded external] + EnvoyDeploymentMode string `json:"envoy-deployment-mode,omitempty"` + // IP address that the proxy listens on IP string `json:"ip,omitempty"` @@ -44,6 +50,10 @@ type ProxyStatus struct { func (m *ProxyStatus) Validate(formats strfmt.Registry) error { var res []error + if err := m.validateEnvoyDeploymentMode(formats); err != nil { + res = append(res, err) + } + if err := m.validateRedirects(formats); err != nil { res = append(res, err) } @@ -54,6 +64,48 @@ func (m *ProxyStatus) Validate(formats strfmt.Registry) error { return nil } +var proxyStatusTypeEnvoyDeploymentModePropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["embedded","external"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + proxyStatusTypeEnvoyDeploymentModePropEnum = append(proxyStatusTypeEnvoyDeploymentModePropEnum, v) + } +} + +const ( + + // ProxyStatusEnvoyDeploymentModeEmbedded captures enum value "embedded" + ProxyStatusEnvoyDeploymentModeEmbedded string = "embedded" + + // ProxyStatusEnvoyDeploymentModeExternal captures enum value "external" + ProxyStatusEnvoyDeploymentModeExternal string = "external" +) + +// prop value enum +func (m *ProxyStatus) validateEnvoyDeploymentModeEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, proxyStatusTypeEnvoyDeploymentModePropEnum, true); err != nil { + return err + } + return nil +} + +func (m *ProxyStatus) validateEnvoyDeploymentMode(formats strfmt.Registry) error { + if swag.IsZero(m.EnvoyDeploymentMode) { // not required + return nil + } + + // value enum + if err := m.validateEnvoyDeploymentModeEnum("envoy-deployment-mode", "body", m.EnvoyDeploymentMode); err != nil { + return err + } + + return nil +} + func (m *ProxyStatus) validateRedirects(formats strfmt.Registry) error { if swag.IsZero(m.Redirects) { // not required return nil diff --git a/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go b/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go index d9595f5bd..eae7726a6 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/service_spec.go @@ -227,6 +227,9 @@ func (m *ServiceSpec) UnmarshalBinary(b []byte) error { // swagger:model ServiceSpecFlags type ServiceSpecFlags struct { + // Service cluster + Cluster string `json:"cluster,omitempty"` + // Service external traffic policy // Enum: [Cluster Local] ExtTrafficPolicy string `json:"extTrafficPolicy,omitempty"` diff --git a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go index ca6bfc345..98285fb5b 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/wireguard_status.go @@ -26,6 +26,9 @@ type WireguardStatus struct { // Wireguard interfaces managed by this Cilium instance Interfaces []*WireguardInterface `json:"interfaces"` + + // Node Encryption status + NodeEncryption string `json:"node-encryption,omitempty"` } // Validate validates this wireguard status diff --git a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go index a67ed0807..22939194d 100644 --- a/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go +++ b/vendor/github.com/cilium/cilium/api/v1/models/zz_generated.deepcopy.go @@ -60,6 +60,70 @@ func (in *BandwidthManager) DeepCopy() *BandwidthManager { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BgpGracefulRestart) DeepCopyInto(out *BgpGracefulRestart) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpGracefulRestart. +func (in *BgpGracefulRestart) DeepCopy() *BgpGracefulRestart { + if in == nil { + return nil + } + out := new(BgpGracefulRestart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BgpPeer) DeepCopyInto(out *BgpPeer) { + *out = *in + if in.Families != nil { + in, out := &in.Families, &out.Families + *out = make([]*BgpPeerFamilies, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(BgpPeerFamilies) + **out = **in + } + } + } + if in.GracefulRestart != nil { + in, out := &in.GracefulRestart, &out.GracefulRestart + *out = new(BgpGracefulRestart) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpPeer. +func (in *BgpPeer) DeepCopy() *BgpPeer { + if in == nil { + return nil + } + out := new(BgpPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BgpPeerFamilies) DeepCopyInto(out *BgpPeerFamilies) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BgpPeerFamilies. +func (in *BgpPeerFamilies) DeepCopy() *BgpPeerFamilies { + if in == nil { + return nil + } + out := new(BgpPeerFamilies) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CIDRPolicy) DeepCopyInto(out *CIDRPolicy) { *out = *in diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/README.md b/vendor/github.com/cilium/cilium/api/v1/observer/README.md index 582b2699c..2561f1f2a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/README.md +++ b/vendor/github.com/cilium/cilium/api/v1/observer/README.md @@ -10,9 +10,13 @@ - [GetDebugEventsRequest](#observer-GetDebugEventsRequest) - [GetDebugEventsResponse](#observer-GetDebugEventsResponse) - [GetFlowsRequest](#observer-GetFlowsRequest) + - [GetFlowsRequest.Experimental](#observer-GetFlowsRequest-Experimental) - [GetFlowsResponse](#observer-GetFlowsResponse) + - [GetNamespacesRequest](#observer-GetNamespacesRequest) + - [GetNamespacesResponse](#observer-GetNamespacesResponse) - [GetNodesRequest](#observer-GetNodesRequest) - [GetNodesResponse](#observer-GetNodesResponse) + - [Namespace](#observer-Namespace) - [Node](#observer-Node) - [ServerStatusRequest](#observer-ServerStatusRequest) - [ServerStatusResponse](#observer-ServerStatusResponse) @@ -140,6 +144,23 @@ GetDebugEventsResponse contains a Cilium datapath debug events. | whitelist | [flow.FlowFilter](#flow-FlowFilter) | repeated | whitelist defines a list of filters which have to match for a flow to be included in the result. If multiple whitelist filters are specified, only one of them has to match for a flow to be included. The whitelist and blacklist can both be specified. In such cases, the set of the returned flows is the set difference `whitelist - blacklist`. In other words, the result will contain all flows matched by the whitelist that are not also simultaneously matched by the blacklist. | | since | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | Since this time for returned flows. Incompatible with `number`. | | until | [google.protobuf.Timestamp](#google-protobuf-Timestamp) | | Until this time for returned flows. Incompatible with `number`. | +| experimental | [GetFlowsRequest.Experimental](#observer-GetFlowsRequest-Experimental) | | | + + + + + + + + +### GetFlowsRequest.Experimental +Experimental contains fields that are not stable yet. Support for +experimental features is always optional and subject to change. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| field_mask | [google.protobuf.FieldMask](#google-protobuf-FieldMask) | | FieldMask allows clients to limit flow's fields that will be returned. For example, {paths: ["source.id", "destination.id"]} will return flows with only these two fields set. | @@ -165,6 +186,31 @@ GetFlowsResponse contains either a flow or a protocol message. + + +### GetNamespacesRequest + + + + + + + + + +### GetNamespacesResponse +GetNamespacesResponse contains the list of namespaces. + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| namespaces | [Namespace](#observer-Namespace) | repeated | Namespaces is a list of namespaces with flows | + + + + + + ### GetNodesRequest @@ -190,6 +236,22 @@ GetNodesResponse contains the list of nodes. + + +### Namespace + + + +| Field | Type | Label | Description | +| ----- | ---- | ----- | ----------- | +| cluster | [string](#string) | | | +| namespace | [string](#string) | | | + + + + + + ### Node @@ -279,6 +341,7 @@ to observe. | GetAgentEvents | [GetAgentEventsRequest](#observer-GetAgentEventsRequest) | [GetAgentEventsResponse](#observer-GetAgentEventsResponse) stream | GetAgentEvents returns Cilium agent events. | | GetDebugEvents | [GetDebugEventsRequest](#observer-GetDebugEventsRequest) | [GetDebugEventsResponse](#observer-GetDebugEventsResponse) stream | GetDebugEvents returns Cilium datapath debug events. | | GetNodes | [GetNodesRequest](#observer-GetNodesRequest) | [GetNodesResponse](#observer-GetNodesResponse) | GetNodes returns information about nodes in a cluster. | +| GetNamespaces | [GetNamespacesRequest](#observer-GetNamespacesRequest) | [GetNamespacesResponse](#observer-GetNamespacesResponse) | GetNamespaces returns information about namespaces in a cluster. The namespaces returned are namespaces which have had network flows in the last hour. The namespaces are returned sorted by cluster name and namespace in ascending order. | | ServerStatus | [ServerStatusRequest](#observer-ServerStatusRequest) | [ServerStatusResponse](#observer-ServerStatusResponse) | ServerStatus returns some details about the running hubble server. | diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go index d0c4aa380..7671333e2 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.1 +// protoc-gen-go v1.30.0 +// protoc v4.22.3 // source: observer/observer.proto package observer @@ -14,6 +14,7 @@ import ( relay "github.com/cilium/cilium/api/v1/relay" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" reflect "reflect" @@ -39,6 +40,15 @@ const FlowType_SOCK = flow.FlowType_SOCK var FlowType_name = flow.FlowType_name var FlowType_value = flow.FlowType_value +type AuthType = flow.AuthType + +const AuthType_DISABLED = flow.AuthType_DISABLED +const AuthType_SPIRE = flow.AuthType_SPIRE +const AuthType_TEST_ALWAYS_FAIL = flow.AuthType_TEST_ALWAYS_FAIL + +var AuthType_name = flow.AuthType_name +var AuthType_value = flow.AuthType_value + type TraceObservationPoint = flow.TraceObservationPoint const TraceObservationPoint_UNKNOWN_POINT = flow.TraceObservationPoint_UNKNOWN_POINT @@ -156,6 +166,8 @@ const DropReason_AUTH_REQUIRED = flow.DropReason_AUTH_REQUIRED const DropReason_CT_NO_MAP_FOUND = flow.DropReason_CT_NO_MAP_FOUND const DropReason_SNAT_NO_MAP_FOUND = flow.DropReason_SNAT_NO_MAP_FOUND const DropReason_INVALID_CLUSTER_ID = flow.DropReason_INVALID_CLUSTER_ID +const DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP = flow.DropReason_UNSUPPORTED_PROTOCOL_FOR_DSR_ENCAP +const DropReason_NO_EGRESS_GATEWAY = flow.DropReason_NO_EGRESS_GATEWAY var DropReason_name = flow.DropReason_name var DropReason_value = flow.DropReason_value @@ -546,7 +558,8 @@ type GetFlowsRequest struct { // Since this time for returned flows. Incompatible with `number`. Since *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=since,proto3" json:"since,omitempty"` // Until this time for returned flows. Incompatible with `number`. - Until *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=until,proto3" json:"until,omitempty"` + Until *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=until,proto3" json:"until,omitempty"` + Experimental *GetFlowsRequest_Experimental `protobuf:"bytes,999,opt,name=experimental,proto3" json:"experimental,omitempty"` } func (x *GetFlowsRequest) Reset() { @@ -630,6 +643,13 @@ func (x *GetFlowsRequest) GetUntil() *timestamppb.Timestamp { return nil } +func (x *GetFlowsRequest) GetExperimental() *GetFlowsRequest_Experimental { + if x != nil { + return x.Experimental + } + return nil +} + // GetFlowsResponse contains either a flow or a protocol message. type GetFlowsResponse struct { state protoimpl.MessageState @@ -1323,6 +1343,148 @@ func (x *TLS) GetServerName() string { return "" } +type GetNamespacesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetNamespacesRequest) Reset() { + *x = GetNamespacesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_observer_observer_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNamespacesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNamespacesRequest) ProtoMessage() {} + +func (x *GetNamespacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_observer_observer_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNamespacesRequest.ProtoReflect.Descriptor instead. +func (*GetNamespacesRequest) Descriptor() ([]byte, []int) { + return file_observer_observer_proto_rawDescGZIP(), []int{12} +} + +// GetNamespacesResponse contains the list of namespaces. +type GetNamespacesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Namespaces is a list of namespaces with flows + Namespaces []*Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"` +} + +func (x *GetNamespacesResponse) Reset() { + *x = GetNamespacesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_observer_observer_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetNamespacesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetNamespacesResponse) ProtoMessage() {} + +func (x *GetNamespacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_observer_observer_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetNamespacesResponse.ProtoReflect.Descriptor instead. +func (*GetNamespacesResponse) Descriptor() ([]byte, []int) { + return file_observer_observer_proto_rawDescGZIP(), []int{13} +} + +func (x *GetNamespacesResponse) GetNamespaces() []*Namespace { + if x != nil { + return x.Namespaces + } + return nil +} + +type Namespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cluster string `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` +} + +func (x *Namespace) Reset() { + *x = Namespace{} + if protoimpl.UnsafeEnabled { + mi := &file_observer_observer_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Namespace) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Namespace) ProtoMessage() {} + +func (x *Namespace) ProtoReflect() protoreflect.Message { + mi := &file_observer_observer_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Namespace.ProtoReflect.Descriptor instead. +func (*Namespace) Descriptor() ([]byte, []int) { + return file_observer_observer_proto_rawDescGZIP(), []int{14} +} + +func (x *Namespace) GetCluster() string { + if x != nil { + return x.Cluster + } + return "" +} + +func (x *Namespace) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + // ExportEvent contains an event to be exported. Not to be used outside of the // exporter feature. type ExportEvent struct { @@ -1347,7 +1509,7 @@ type ExportEvent struct { func (x *ExportEvent) Reset() { *x = ExportEvent{} if protoimpl.UnsafeEnabled { - mi := &file_observer_observer_proto_msgTypes[12] + mi := &file_observer_observer_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1360,7 +1522,7 @@ func (x *ExportEvent) String() string { func (*ExportEvent) ProtoMessage() {} func (x *ExportEvent) ProtoReflect() protoreflect.Message { - mi := &file_observer_observer_proto_msgTypes[12] + mi := &file_observer_observer_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1373,7 +1535,7 @@ func (x *ExportEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use ExportEvent.ProtoReflect.Descriptor instead. func (*ExportEvent) Descriptor() ([]byte, []int) { - return file_observer_observer_proto_rawDescGZIP(), []int{12} + return file_observer_observer_proto_rawDescGZIP(), []int{15} } func (m *ExportEvent) GetResponseTypes() isExportEvent_ResponseTypes { @@ -1473,6 +1635,58 @@ func (*ExportEvent_AgentEvent) isExportEvent_ResponseTypes() {} func (*ExportEvent_DebugEvent) isExportEvent_ResponseTypes() {} +// Experimental contains fields that are not stable yet. Support for +// experimental features is always optional and subject to change. +type GetFlowsRequest_Experimental struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // FieldMask allows clients to limit flow's fields that will be returned. + // For example, {paths: ["source.id", "destination.id"]} will return flows + // with only these two fields set. + FieldMask *fieldmaskpb.FieldMask `protobuf:"bytes,1,opt,name=field_mask,json=fieldMask,proto3" json:"field_mask,omitempty"` +} + +func (x *GetFlowsRequest_Experimental) Reset() { + *x = GetFlowsRequest_Experimental{} + if protoimpl.UnsafeEnabled { + mi := &file_observer_observer_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFlowsRequest_Experimental) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFlowsRequest_Experimental) ProtoMessage() {} + +func (x *GetFlowsRequest_Experimental) ProtoReflect() protoreflect.Message { + mi := &file_observer_observer_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFlowsRequest_Experimental.ProtoReflect.Descriptor instead. +func (*GetFlowsRequest_Experimental) Descriptor() ([]byte, []int) { + return file_observer_observer_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *GetFlowsRequest_Experimental) GetFieldMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.FieldMask + } + return nil +} + var File_observer_observer_proto protoreflect.FileDescriptor var file_observer_observer_proto_rawDesc = []byte{ @@ -1482,190 +1696,218 @@ var file_observer_observer_proto_rawDesc = []byte{ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0f, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2f, 0x72, 0x65, 0x6c, - 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0xf3, 0x02, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, - 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x6c, 0x6f, - 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x6c, 0x6f, - 0x77, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c, 0x6f, 0x77, - 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x4c, - 0x0a, 0x13, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6e, 0x75, 0x6d, 0x43, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x50, 0x0a, 0x15, - 0x6e, 0x75, 0x6d, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, - 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6e, 0x75, 0x6d, 0x55, 0x6e, - 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x2b, - 0x0a, 0x11, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x6f, - 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x75, 0x6e, 0x61, 0x76, 0x61, - 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa1, 0x02, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, - 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, - 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, - 0x2e, 0x0a, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73, 0x74, 0x12, - 0x2e, 0x0a, 0x09, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x06, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x46, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73, 0x74, 0x12, - 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, - 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, - 0x74, 0x69, 0x6c, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x84, 0x02, 0x0a, 0x10, 0x47, 0x65, - 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, - 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x4e, 0x6f, - 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, - 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x6c, - 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, - 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, - 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x10, - 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x22, 0xc1, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, - 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, - 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, - 0x63, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, - 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, - 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x31, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, - 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, - 0x65, 0x22, 0xc1, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x0f, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x11, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2f, 0x72, + 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0xf3, 0x02, 0x0a, 0x14, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, + 0x6d, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, + 0x75, 0x6d, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, + 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, + 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f, + 0x77, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c, + 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x73, + 0x12, 0x4c, 0x0a, 0x13, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, + 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x11, 0x6e, 0x75, 0x6d, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x50, + 0x0a, 0x15, 0x6e, 0x75, 0x6d, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, + 0x65, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6e, 0x75, 0x6d, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, + 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x75, 0x6e, 0x61, + 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x03, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x46, + 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, - 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, - 0x77, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x6c, 0x6f, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, + 0x77, 0x12, 0x2e, 0x0a, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x05, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x62, 0x6c, 0x61, 0x63, 0x6b, 0x6c, 0x69, 0x73, + 0x74, 0x12, 0x2e, 0x0a, 0x09, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x09, 0x77, 0x68, 0x69, 0x74, 0x65, 0x6c, 0x69, 0x73, + 0x74, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, - 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, - 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x31, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, - 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, - 0x6d, 0x65, 0x22, 0x11, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x38, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x6e, 0x6f, 0x64, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, - 0x8d, 0x02, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, - 0x12, 0x26, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x10, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x54, 0x4c, 0x53, 0x52, 0x03, 0x74, 0x6c, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x70, - 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x46, 0x6c, - 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x6c, 0x6f, 0x77, 0x73, - 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x22, - 0x40, 0x0a, 0x03, 0x54, 0x4c, 0x53, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, - 0x65, 0x22, 0xe9, 0x02, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x04, 0x66, - 0x6c, 0x6f, 0x77, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x79, - 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, - 0x0a, 0x0b, 0x6c, 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, - 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, - 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, + 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x12, 0x4b, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x18, 0xe7, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, + 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x6c, 0x52, 0x0c, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x1a, 0x49, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, + 0x73, 0x6b, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x22, 0x84, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, + 0x6f, 0x77, 0x48, 0x00, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x6c, 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6c, + 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, + 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x15, 0x47, + 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x69, 0x72, + 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x05, 0x73, 0x69, + 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a, 0x05, + 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x22, 0x9a, + 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, + 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x10, 0x0a, 0x0e, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x32, 0x99, 0x03, - 0x0a, 0x08, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x08, 0x47, 0x65, - 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1a, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, - 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, - 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, - 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, - 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x6f, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, - 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, - 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, - 0x19, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, - 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, - 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x02, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xc1, 0x01, 0x0a, 0x15, + 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x72, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x06, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x05, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x12, 0x30, 0x0a, + 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x05, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x22, + 0x9a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x0b, 0x64, 0x65, + 0x62, 0x75, 0x67, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, + 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x04, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x11, 0x0a, 0x0f, + 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x38, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x52, 0x05, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x8d, 0x02, 0x0a, 0x04, 0x4e, 0x6f, + 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x72, 0x65, 0x6c, 0x61, + 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x54, 0x4c, 0x53, 0x52, 0x03, + 0x74, 0x6c, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x4e, 0x73, + 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x75, 0x6d, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, 0x75, 0x6d, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x6d, 0x61, 0x78, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, + 0x65, 0x6e, 0x5f, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x73, 0x65, 0x65, 0x6e, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x22, 0x40, 0x0a, 0x03, 0x54, 0x4c, 0x53, + 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x47, + 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x22, 0x4c, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0a, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x22, 0x43, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xe9, 0x02, 0x0a, 0x0b, 0x45, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x46, 0x6c, 0x6f, 0x77, + 0x48, 0x00, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x6f, 0x64, 0x65, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, + 0x72, 0x65, 0x6c, 0x61, 0x79, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x6c, 0x6f, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x4c, 0x6f, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6c, 0x6f, 0x73, + 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x0b, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x66, + 0x6c, 0x6f, 0x77, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, + 0x52, 0x0a, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x0b, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x10, 0x2e, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x64, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0xe8, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x2f, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, + 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x32, 0xed, 0x03, 0x0a, 0x08, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, + 0x45, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x12, 0x19, 0x2e, 0x6f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1f, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6f, 0x62, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, + 0x57, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x1f, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, + 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, + 0x74, 0x44, 0x65, 0x62, 0x75, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x1a, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, + 0x64, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x52, 0x0a, + 0x0d, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x1e, + 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, + 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x00, 0x12, 0x4f, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x1d, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1e, 0x2e, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x63, 0x69, 0x6c, 0x69, 0x75, 0x6d, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x03, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1680,74 +1922,84 @@ func file_observer_observer_proto_rawDescGZIP() []byte { return file_observer_observer_proto_rawDescData } -var file_observer_observer_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_observer_observer_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_observer_observer_proto_goTypes = []interface{}{ - (*ServerStatusRequest)(nil), // 0: observer.ServerStatusRequest - (*ServerStatusResponse)(nil), // 1: observer.ServerStatusResponse - (*GetFlowsRequest)(nil), // 2: observer.GetFlowsRequest - (*GetFlowsResponse)(nil), // 3: observer.GetFlowsResponse - (*GetAgentEventsRequest)(nil), // 4: observer.GetAgentEventsRequest - (*GetAgentEventsResponse)(nil), // 5: observer.GetAgentEventsResponse - (*GetDebugEventsRequest)(nil), // 6: observer.GetDebugEventsRequest - (*GetDebugEventsResponse)(nil), // 7: observer.GetDebugEventsResponse - (*GetNodesRequest)(nil), // 8: observer.GetNodesRequest - (*GetNodesResponse)(nil), // 9: observer.GetNodesResponse - (*Node)(nil), // 10: observer.Node - (*TLS)(nil), // 11: observer.TLS - (*ExportEvent)(nil), // 12: observer.ExportEvent - (*wrapperspb.UInt32Value)(nil), // 13: google.protobuf.UInt32Value - (*flow.FlowFilter)(nil), // 14: flow.FlowFilter - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp - (*flow.Flow)(nil), // 16: flow.Flow - (*relay.NodeStatusEvent)(nil), // 17: relay.NodeStatusEvent - (*flow.LostEvent)(nil), // 18: flow.LostEvent - (*flow.AgentEvent)(nil), // 19: flow.AgentEvent - (*flow.DebugEvent)(nil), // 20: flow.DebugEvent - (relay.NodeState)(0), // 21: relay.NodeState + (*ServerStatusRequest)(nil), // 0: observer.ServerStatusRequest + (*ServerStatusResponse)(nil), // 1: observer.ServerStatusResponse + (*GetFlowsRequest)(nil), // 2: observer.GetFlowsRequest + (*GetFlowsResponse)(nil), // 3: observer.GetFlowsResponse + (*GetAgentEventsRequest)(nil), // 4: observer.GetAgentEventsRequest + (*GetAgentEventsResponse)(nil), // 5: observer.GetAgentEventsResponse + (*GetDebugEventsRequest)(nil), // 6: observer.GetDebugEventsRequest + (*GetDebugEventsResponse)(nil), // 7: observer.GetDebugEventsResponse + (*GetNodesRequest)(nil), // 8: observer.GetNodesRequest + (*GetNodesResponse)(nil), // 9: observer.GetNodesResponse + (*Node)(nil), // 10: observer.Node + (*TLS)(nil), // 11: observer.TLS + (*GetNamespacesRequest)(nil), // 12: observer.GetNamespacesRequest + (*GetNamespacesResponse)(nil), // 13: observer.GetNamespacesResponse + (*Namespace)(nil), // 14: observer.Namespace + (*ExportEvent)(nil), // 15: observer.ExportEvent + (*GetFlowsRequest_Experimental)(nil), // 16: observer.GetFlowsRequest.Experimental + (*wrapperspb.UInt32Value)(nil), // 17: google.protobuf.UInt32Value + (*flow.FlowFilter)(nil), // 18: flow.FlowFilter + (*timestamppb.Timestamp)(nil), // 19: google.protobuf.Timestamp + (*flow.Flow)(nil), // 20: flow.Flow + (*relay.NodeStatusEvent)(nil), // 21: relay.NodeStatusEvent + (*flow.LostEvent)(nil), // 22: flow.LostEvent + (*flow.AgentEvent)(nil), // 23: flow.AgentEvent + (*flow.DebugEvent)(nil), // 24: flow.DebugEvent + (relay.NodeState)(0), // 25: relay.NodeState + (*fieldmaskpb.FieldMask)(nil), // 26: google.protobuf.FieldMask } var file_observer_observer_proto_depIdxs = []int32{ - 13, // 0: observer.ServerStatusResponse.num_connected_nodes:type_name -> google.protobuf.UInt32Value - 13, // 1: observer.ServerStatusResponse.num_unavailable_nodes:type_name -> google.protobuf.UInt32Value - 14, // 2: observer.GetFlowsRequest.blacklist:type_name -> flow.FlowFilter - 14, // 3: observer.GetFlowsRequest.whitelist:type_name -> flow.FlowFilter - 15, // 4: observer.GetFlowsRequest.since:type_name -> google.protobuf.Timestamp - 15, // 5: observer.GetFlowsRequest.until:type_name -> google.protobuf.Timestamp - 16, // 6: observer.GetFlowsResponse.flow:type_name -> flow.Flow - 17, // 7: observer.GetFlowsResponse.node_status:type_name -> relay.NodeStatusEvent - 18, // 8: observer.GetFlowsResponse.lost_events:type_name -> flow.LostEvent - 15, // 9: observer.GetFlowsResponse.time:type_name -> google.protobuf.Timestamp - 15, // 10: observer.GetAgentEventsRequest.since:type_name -> google.protobuf.Timestamp - 15, // 11: observer.GetAgentEventsRequest.until:type_name -> google.protobuf.Timestamp - 19, // 12: observer.GetAgentEventsResponse.agent_event:type_name -> flow.AgentEvent - 15, // 13: observer.GetAgentEventsResponse.time:type_name -> google.protobuf.Timestamp - 15, // 14: observer.GetDebugEventsRequest.since:type_name -> google.protobuf.Timestamp - 15, // 15: observer.GetDebugEventsRequest.until:type_name -> google.protobuf.Timestamp - 20, // 16: observer.GetDebugEventsResponse.debug_event:type_name -> flow.DebugEvent - 15, // 17: observer.GetDebugEventsResponse.time:type_name -> google.protobuf.Timestamp - 10, // 18: observer.GetNodesResponse.nodes:type_name -> observer.Node - 21, // 19: observer.Node.state:type_name -> relay.NodeState - 11, // 20: observer.Node.tls:type_name -> observer.TLS - 16, // 21: observer.ExportEvent.flow:type_name -> flow.Flow - 17, // 22: observer.ExportEvent.node_status:type_name -> relay.NodeStatusEvent - 18, // 23: observer.ExportEvent.lost_events:type_name -> flow.LostEvent - 19, // 24: observer.ExportEvent.agent_event:type_name -> flow.AgentEvent - 20, // 25: observer.ExportEvent.debug_event:type_name -> flow.DebugEvent - 15, // 26: observer.ExportEvent.time:type_name -> google.protobuf.Timestamp - 2, // 27: observer.Observer.GetFlows:input_type -> observer.GetFlowsRequest - 4, // 28: observer.Observer.GetAgentEvents:input_type -> observer.GetAgentEventsRequest - 6, // 29: observer.Observer.GetDebugEvents:input_type -> observer.GetDebugEventsRequest - 8, // 30: observer.Observer.GetNodes:input_type -> observer.GetNodesRequest - 0, // 31: observer.Observer.ServerStatus:input_type -> observer.ServerStatusRequest - 3, // 32: observer.Observer.GetFlows:output_type -> observer.GetFlowsResponse - 5, // 33: observer.Observer.GetAgentEvents:output_type -> observer.GetAgentEventsResponse - 7, // 34: observer.Observer.GetDebugEvents:output_type -> observer.GetDebugEventsResponse - 9, // 35: observer.Observer.GetNodes:output_type -> observer.GetNodesResponse - 1, // 36: observer.Observer.ServerStatus:output_type -> observer.ServerStatusResponse - 32, // [32:37] is the sub-list for method output_type - 27, // [27:32] is the sub-list for method input_type - 27, // [27:27] is the sub-list for extension type_name - 27, // [27:27] is the sub-list for extension extendee - 0, // [0:27] is the sub-list for field type_name + 17, // 0: observer.ServerStatusResponse.num_connected_nodes:type_name -> google.protobuf.UInt32Value + 17, // 1: observer.ServerStatusResponse.num_unavailable_nodes:type_name -> google.protobuf.UInt32Value + 18, // 2: observer.GetFlowsRequest.blacklist:type_name -> flow.FlowFilter + 18, // 3: observer.GetFlowsRequest.whitelist:type_name -> flow.FlowFilter + 19, // 4: observer.GetFlowsRequest.since:type_name -> google.protobuf.Timestamp + 19, // 5: observer.GetFlowsRequest.until:type_name -> google.protobuf.Timestamp + 16, // 6: observer.GetFlowsRequest.experimental:type_name -> observer.GetFlowsRequest.Experimental + 20, // 7: observer.GetFlowsResponse.flow:type_name -> flow.Flow + 21, // 8: observer.GetFlowsResponse.node_status:type_name -> relay.NodeStatusEvent + 22, // 9: observer.GetFlowsResponse.lost_events:type_name -> flow.LostEvent + 19, // 10: observer.GetFlowsResponse.time:type_name -> google.protobuf.Timestamp + 19, // 11: observer.GetAgentEventsRequest.since:type_name -> google.protobuf.Timestamp + 19, // 12: observer.GetAgentEventsRequest.until:type_name -> google.protobuf.Timestamp + 23, // 13: observer.GetAgentEventsResponse.agent_event:type_name -> flow.AgentEvent + 19, // 14: observer.GetAgentEventsResponse.time:type_name -> google.protobuf.Timestamp + 19, // 15: observer.GetDebugEventsRequest.since:type_name -> google.protobuf.Timestamp + 19, // 16: observer.GetDebugEventsRequest.until:type_name -> google.protobuf.Timestamp + 24, // 17: observer.GetDebugEventsResponse.debug_event:type_name -> flow.DebugEvent + 19, // 18: observer.GetDebugEventsResponse.time:type_name -> google.protobuf.Timestamp + 10, // 19: observer.GetNodesResponse.nodes:type_name -> observer.Node + 25, // 20: observer.Node.state:type_name -> relay.NodeState + 11, // 21: observer.Node.tls:type_name -> observer.TLS + 14, // 22: observer.GetNamespacesResponse.namespaces:type_name -> observer.Namespace + 20, // 23: observer.ExportEvent.flow:type_name -> flow.Flow + 21, // 24: observer.ExportEvent.node_status:type_name -> relay.NodeStatusEvent + 22, // 25: observer.ExportEvent.lost_events:type_name -> flow.LostEvent + 23, // 26: observer.ExportEvent.agent_event:type_name -> flow.AgentEvent + 24, // 27: observer.ExportEvent.debug_event:type_name -> flow.DebugEvent + 19, // 28: observer.ExportEvent.time:type_name -> google.protobuf.Timestamp + 26, // 29: observer.GetFlowsRequest.Experimental.field_mask:type_name -> google.protobuf.FieldMask + 2, // 30: observer.Observer.GetFlows:input_type -> observer.GetFlowsRequest + 4, // 31: observer.Observer.GetAgentEvents:input_type -> observer.GetAgentEventsRequest + 6, // 32: observer.Observer.GetDebugEvents:input_type -> observer.GetDebugEventsRequest + 8, // 33: observer.Observer.GetNodes:input_type -> observer.GetNodesRequest + 12, // 34: observer.Observer.GetNamespaces:input_type -> observer.GetNamespacesRequest + 0, // 35: observer.Observer.ServerStatus:input_type -> observer.ServerStatusRequest + 3, // 36: observer.Observer.GetFlows:output_type -> observer.GetFlowsResponse + 5, // 37: observer.Observer.GetAgentEvents:output_type -> observer.GetAgentEventsResponse + 7, // 38: observer.Observer.GetDebugEvents:output_type -> observer.GetDebugEventsResponse + 9, // 39: observer.Observer.GetNodes:output_type -> observer.GetNodesResponse + 13, // 40: observer.Observer.GetNamespaces:output_type -> observer.GetNamespacesResponse + 1, // 41: observer.Observer.ServerStatus:output_type -> observer.ServerStatusResponse + 36, // [36:42] is the sub-list for method output_type + 30, // [30:36] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name } func init() { file_observer_observer_proto_init() } @@ -1901,6 +2153,42 @@ func file_observer_observer_proto_init() { } } file_observer_observer_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNamespacesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_observer_observer_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNamespacesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_observer_observer_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Namespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_observer_observer_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExportEvent); i { case 0: return &v.state @@ -1912,13 +2200,25 @@ func file_observer_observer_proto_init() { return nil } } + file_observer_observer_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFlowsRequest_Experimental); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } file_observer_observer_proto_msgTypes[3].OneofWrappers = []interface{}{ (*GetFlowsResponse_Flow)(nil), (*GetFlowsResponse_NodeStatus)(nil), (*GetFlowsResponse_LostEvents)(nil), } - file_observer_observer_proto_msgTypes[12].OneofWrappers = []interface{}{ + file_observer_observer_proto_msgTypes[15].OneofWrappers = []interface{}{ (*ExportEvent_Flow)(nil), (*ExportEvent_NodeStatus)(nil), (*ExportEvent_LostEvents)(nil), @@ -1931,7 +2231,7 @@ func file_observer_observer_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_observer_observer_proto_rawDesc, NumEnums: 0, - NumMessages: 13, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.json.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.json.go index a305ccd03..cf61f823c 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.json.go +++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer.pb.json.go @@ -55,6 +55,22 @@ func (msg *GetFlowsRequest) UnmarshalJSON(b []byte) error { }.Unmarshal(b, msg) } +// MarshalJSON implements json.Marshaler +func (msg *GetFlowsRequest_Experimental) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *GetFlowsRequest_Experimental) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + // MarshalJSON implements json.Marshaler func (msg *GetFlowsResponse) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{ @@ -199,6 +215,54 @@ func (msg *TLS) UnmarshalJSON(b []byte) error { }.Unmarshal(b, msg) } +// MarshalJSON implements json.Marshaler +func (msg *GetNamespacesRequest) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *GetNamespacesRequest) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *GetNamespacesResponse) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *GetNamespacesResponse) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + +// MarshalJSON implements json.Marshaler +func (msg *Namespace) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{ + UseEnumNumbers: false, + EmitUnpopulated: false, + UseProtoNames: true, + }.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Namespace) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{ + DiscardUnknown: false, + }.Unmarshal(b, msg) +} + // MarshalJSON implements json.Marshaler func (msg *ExportEvent) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{ diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto b/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto index e514f84c8..f6844e7bf 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto +++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer.proto @@ -5,6 +5,7 @@ syntax = "proto3"; import "google/protobuf/wrappers.proto"; import "google/protobuf/timestamp.proto"; +import "google/protobuf/field_mask.proto"; import public "flow/flow.proto"; import "relay/relay.proto"; @@ -27,6 +28,12 @@ service Observer { // GetNodes returns information about nodes in a cluster. rpc GetNodes(GetNodesRequest) returns (GetNodesResponse) {} + // GetNamespaces returns information about namespaces in a cluster. + // The namespaces returned are namespaces which have had network flows in + // the last hour. The namespaces are returned sorted by cluster name and + // namespace in ascending order. + rpc GetNamespaces(GetNamespacesRequest) returns (GetNamespacesResponse) {} + // ServerStatus returns some details about the running hubble server. rpc ServerStatus(ServerStatusRequest) returns (ServerStatusResponse) {} } @@ -105,6 +112,16 @@ message GetFlowsRequest { // Until this time for returned flows. Incompatible with `number`. google.protobuf.Timestamp until = 8; + + // Experimental contains fields that are not stable yet. Support for + // experimental features is always optional and subject to change. + message Experimental { + // FieldMask allows clients to limit flow's fields that will be returned. + // For example, {paths: ["source.id", "destination.id"]} will return flows + // with only these two fields set. + google.protobuf.FieldMask field_mask = 1; + } + Experimental experimental = 999; } // GetFlowsResponse contains either a flow or a protocol message. @@ -236,6 +253,19 @@ message TLS { string server_name = 2; } +message GetNamespacesRequest {} + +// GetNamespacesResponse contains the list of namespaces. +message GetNamespacesResponse { + // Namespaces is a list of namespaces with flows + repeated Namespace namespaces = 1; +} + +message Namespace { + string cluster = 1; + string namespace = 2; +} + // ExportEvent contains an event to be exported. Not to be used outside of the // exporter feature. message ExportEvent { diff --git a/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go b/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go index 5500f0da2..198b72937 100644 --- a/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/observer/observer_grpc.pb.go @@ -1,7 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.1 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.3 // source: observer/observer.proto package observer @@ -18,6 +21,15 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Observer_GetFlows_FullMethodName = "/observer.Observer/GetFlows" + Observer_GetAgentEvents_FullMethodName = "/observer.Observer/GetAgentEvents" + Observer_GetDebugEvents_FullMethodName = "/observer.Observer/GetDebugEvents" + Observer_GetNodes_FullMethodName = "/observer.Observer/GetNodes" + Observer_GetNamespaces_FullMethodName = "/observer.Observer/GetNamespaces" + Observer_ServerStatus_FullMethodName = "/observer.Observer/ServerStatus" +) + // ObserverClient is the client API for Observer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -30,6 +42,11 @@ type ObserverClient interface { GetDebugEvents(ctx context.Context, in *GetDebugEventsRequest, opts ...grpc.CallOption) (Observer_GetDebugEventsClient, error) // GetNodes returns information about nodes in a cluster. GetNodes(ctx context.Context, in *GetNodesRequest, opts ...grpc.CallOption) (*GetNodesResponse, error) + // GetNamespaces returns information about namespaces in a cluster. + // The namespaces returned are namespaces which have had network flows in + // the last hour. The namespaces are returned sorted by cluster name and + // namespace in ascending order. + GetNamespaces(ctx context.Context, in *GetNamespacesRequest, opts ...grpc.CallOption) (*GetNamespacesResponse, error) // ServerStatus returns some details about the running hubble server. ServerStatus(ctx context.Context, in *ServerStatusRequest, opts ...grpc.CallOption) (*ServerStatusResponse, error) } @@ -43,7 +60,7 @@ func NewObserverClient(cc grpc.ClientConnInterface) ObserverClient { } func (c *observerClient) GetFlows(ctx context.Context, in *GetFlowsRequest, opts ...grpc.CallOption) (Observer_GetFlowsClient, error) { - stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[0], "/observer.Observer/GetFlows", opts...) + stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[0], Observer_GetFlows_FullMethodName, opts...) if err != nil { return nil, err } @@ -75,7 +92,7 @@ func (x *observerGetFlowsClient) Recv() (*GetFlowsResponse, error) { } func (c *observerClient) GetAgentEvents(ctx context.Context, in *GetAgentEventsRequest, opts ...grpc.CallOption) (Observer_GetAgentEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[1], "/observer.Observer/GetAgentEvents", opts...) + stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[1], Observer_GetAgentEvents_FullMethodName, opts...) if err != nil { return nil, err } @@ -107,7 +124,7 @@ func (x *observerGetAgentEventsClient) Recv() (*GetAgentEventsResponse, error) { } func (c *observerClient) GetDebugEvents(ctx context.Context, in *GetDebugEventsRequest, opts ...grpc.CallOption) (Observer_GetDebugEventsClient, error) { - stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[2], "/observer.Observer/GetDebugEvents", opts...) + stream, err := c.cc.NewStream(ctx, &Observer_ServiceDesc.Streams[2], Observer_GetDebugEvents_FullMethodName, opts...) if err != nil { return nil, err } @@ -140,7 +157,16 @@ func (x *observerGetDebugEventsClient) Recv() (*GetDebugEventsResponse, error) { func (c *observerClient) GetNodes(ctx context.Context, in *GetNodesRequest, opts ...grpc.CallOption) (*GetNodesResponse, error) { out := new(GetNodesResponse) - err := c.cc.Invoke(ctx, "/observer.Observer/GetNodes", in, out, opts...) + err := c.cc.Invoke(ctx, Observer_GetNodes_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *observerClient) GetNamespaces(ctx context.Context, in *GetNamespacesRequest, opts ...grpc.CallOption) (*GetNamespacesResponse, error) { + out := new(GetNamespacesResponse) + err := c.cc.Invoke(ctx, Observer_GetNamespaces_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -149,7 +175,7 @@ func (c *observerClient) GetNodes(ctx context.Context, in *GetNodesRequest, opts func (c *observerClient) ServerStatus(ctx context.Context, in *ServerStatusRequest, opts ...grpc.CallOption) (*ServerStatusResponse, error) { out := new(ServerStatusResponse) - err := c.cc.Invoke(ctx, "/observer.Observer/ServerStatus", in, out, opts...) + err := c.cc.Invoke(ctx, Observer_ServerStatus_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -168,6 +194,11 @@ type ObserverServer interface { GetDebugEvents(*GetDebugEventsRequest, Observer_GetDebugEventsServer) error // GetNodes returns information about nodes in a cluster. GetNodes(context.Context, *GetNodesRequest) (*GetNodesResponse, error) + // GetNamespaces returns information about namespaces in a cluster. + // The namespaces returned are namespaces which have had network flows in + // the last hour. The namespaces are returned sorted by cluster name and + // namespace in ascending order. + GetNamespaces(context.Context, *GetNamespacesRequest) (*GetNamespacesResponse, error) // ServerStatus returns some details about the running hubble server. ServerStatus(context.Context, *ServerStatusRequest) (*ServerStatusResponse, error) } @@ -188,6 +219,9 @@ func (UnimplementedObserverServer) GetDebugEvents(*GetDebugEventsRequest, Observ func (UnimplementedObserverServer) GetNodes(context.Context, *GetNodesRequest) (*GetNodesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetNodes not implemented") } +func (UnimplementedObserverServer) GetNamespaces(context.Context, *GetNamespacesRequest) (*GetNamespacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetNamespaces not implemented") +} func (UnimplementedObserverServer) ServerStatus(context.Context, *ServerStatusRequest) (*ServerStatusResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ServerStatus not implemented") } @@ -276,7 +310,7 @@ func _Observer_GetNodes_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/observer.Observer/GetNodes", + FullMethod: Observer_GetNodes_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ObserverServer).GetNodes(ctx, req.(*GetNodesRequest)) @@ -284,6 +318,24 @@ func _Observer_GetNodes_Handler(srv interface{}, ctx context.Context, dec func(i return interceptor(ctx, in, info, handler) } +func _Observer_GetNamespaces_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNamespacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ObserverServer).GetNamespaces(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Observer_GetNamespaces_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ObserverServer).GetNamespaces(ctx, req.(*GetNamespacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Observer_ServerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ServerStatusRequest) if err := dec(in); err != nil { @@ -294,7 +346,7 @@ func _Observer_ServerStatus_Handler(srv interface{}, ctx context.Context, dec fu } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/observer.Observer/ServerStatus", + FullMethod: Observer_ServerStatus_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ObserverServer).ServerStatus(ctx, req.(*ServerStatusRequest)) @@ -313,6 +365,10 @@ var Observer_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetNodes", Handler: _Observer_GetNodes_Handler, }, + { + MethodName: "GetNamespaces", + Handler: _Observer_GetNamespaces_Handler, + }, { MethodName: "ServerStatus", Handler: _Observer_ServerStatus_Handler, diff --git a/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go b/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go index 23e541dfe..a85ed2b9a 100644 --- a/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/peer/peer.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.1 +// protoc-gen-go v1.30.0 +// protoc v4.22.3 // source: peer/peer.proto package peer diff --git a/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go b/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go index a03830a30..4307d6c72 100644 --- a/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/peer/peer_grpc.pb.go @@ -1,7 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.1 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.3 // source: peer/peer.proto package peer @@ -18,6 +21,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Peer_Notify_FullMethodName = "/peer.Peer/Notify" +) + // PeerClient is the client API for Peer service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -38,7 +45,7 @@ func NewPeerClient(cc grpc.ClientConnInterface) PeerClient { } func (c *peerClient) Notify(ctx context.Context, in *NotifyRequest, opts ...grpc.CallOption) (Peer_NotifyClient, error) { - stream, err := c.cc.NewStream(ctx, &Peer_ServiceDesc.Streams[0], "/peer.Peer/Notify", opts...) + stream, err := c.cc.NewStream(ctx, &Peer_ServiceDesc.Streams[0], Peer_Notify_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go index 0fddeeee7..14ef43820 100644 --- a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.1 +// protoc-gen-go v1.30.0 +// protoc v4.22.3 // source: recorder/recorder.proto package recorder diff --git a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go index cc0439e8d..1e026d533 100644 --- a/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/recorder/recorder_grpc.pb.go @@ -1,7 +1,10 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Hubble + // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.20.1 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v4.22.3 // source: recorder/recorder.proto package recorder @@ -18,6 +21,10 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Recorder_Record_FullMethodName = "/recorder.Recorder/Record" +) + // RecorderClient is the client API for Recorder service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -36,7 +43,7 @@ func NewRecorderClient(cc grpc.ClientConnInterface) RecorderClient { } func (c *recorderClient) Record(ctx context.Context, opts ...grpc.CallOption) (Recorder_RecordClient, error) { - stream, err := c.cc.NewStream(ctx, &Recorder_ServiceDesc.Streams[0], "/recorder.Recorder/Record", opts...) + stream, err := c.cc.NewStream(ctx, &Recorder_ServiceDesc.Streams[0], Recorder_Record_FullMethodName, opts...) if err != nil { return nil, err } diff --git a/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go b/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go index 35cc8c9f0..1f40c6c30 100644 --- a/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go +++ b/vendor/github.com/cilium/cilium/api/v1/relay/relay.pb.go @@ -3,8 +3,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.20.1 +// protoc-gen-go v1.30.0 +// protoc v4.22.3 // source: relay/relay.proto package relay diff --git a/vendor/github.com/cilium/cilium/pkg/api/apierror.go b/vendor/github.com/cilium/cilium/pkg/api/apierror.go deleted file mode 100644 index 83c44eeba..000000000 --- a/vendor/github.com/cilium/cilium/pkg/api/apierror.go +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package api - -import ( - "fmt" - "net/http" - - "github.com/go-openapi/runtime" - - "github.com/cilium/cilium/api/v1/models" -) - -// APIError is the error representation for the API. -type APIError struct { - code int - msg string -} - -// New creates a API error from the code, msg and extra arguments. -func New(code int, msg string, args ...interface{}) *APIError { - if code <= 0 { - code = 500 - } - - if len(args) > 0 { - return &APIError{code: code, msg: fmt.Sprintf(msg, args...)} - } - return &APIError{code: code, msg: msg} -} - -// Error creates a new API error from the code and error. -func Error(code int, err error) *APIError { - if err == nil { - err = fmt.Errorf("Error pointer was nil") - } - - return New(code, err.Error()) -} - -// Error returns the API error message. -func (a *APIError) Error() string { - return a.msg -} - -// GetModel returns model error. -func (a *APIError) GetModel() *models.Error { - m := models.Error(a.msg) - return &m -} - -// WriteResponse to the client. -func (a *APIError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { - rw.WriteHeader(a.code) - m := a.GetModel() - if err := producer.Produce(rw, m); err != nil { - panic(err) - } - -} diff --git a/vendor/github.com/cilium/cilium/pkg/api/apipanic.go b/vendor/github.com/cilium/cilium/pkg/api/apipanic.go deleted file mode 100644 index 38481b26d..000000000 --- a/vendor/github.com/cilium/cilium/pkg/api/apipanic.go +++ /dev/null @@ -1,43 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package api - -import ( - "net/http" - "os" - "runtime/debug" - - "github.com/sirupsen/logrus" - - "github.com/cilium/cilium/pkg/logging" -) - -// APIPanicHandler recovers from API panics and logs encountered panics -type APIPanicHandler struct { - Next http.Handler -} - -// ServeHTTP implements the http.Handler interface. -// It recovers from panics of all next handlers and logs them -func (h *APIPanicHandler) ServeHTTP(wr http.ResponseWriter, req *http.Request) { - defer func() { - if r := recover(); r != nil { - fields := logrus.Fields{ - "panic_message": r, - "url": req.URL.String(), - "method": req.Method, - "client": req.RemoteAddr, - } - log.WithFields(fields).Warn("Cilium API handler panicked") - if logging.DefaultLogger.IsLevelEnabled(logrus.DebugLevel) { - os.Stdout.Write(debug.Stack()) - } - wr.WriteHeader(http.StatusInternalServerError) - if _, err := wr.Write([]byte("Internal error occurred, check Cilium logs for details.")); err != nil { - log.WithError(err).Debug("Failed to write API response") - } - } - }() - h.Next.ServeHTTP(wr, req) -} diff --git a/vendor/github.com/cilium/cilium/pkg/api/const.go b/vendor/github.com/cilium/cilium/pkg/api/const.go deleted file mode 100644 index 356c6ddb3..000000000 --- a/vendor/github.com/cilium/cilium/pkg/api/const.go +++ /dev/null @@ -1,18 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package api - -import ( - "os" - "time" -) - -const ( - // CiliumGroupName is the cilium's unix group name. - CiliumGroupName = "cilium" - // SocketFileMode is the default file mode for the sockets. - SocketFileMode os.FileMode = 0660 - // ClientTimeout specifies timeout to be used by clients - ClientTimeout = 90 * time.Second -) diff --git a/vendor/github.com/cilium/cilium/pkg/api/doc.go b/vendor/github.com/cilium/cilium/pkg/api/doc.go deleted file mode 100644 index 83b4b5bfb..000000000 --- a/vendor/github.com/cilium/cilium/pkg/api/doc.go +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -// Package api provides the Cilium useful helpers for the external API -package api diff --git a/vendor/github.com/cilium/cilium/pkg/api/socket.go b/vendor/github.com/cilium/cilium/pkg/api/socket.go deleted file mode 100644 index 6af237a1e..000000000 --- a/vendor/github.com/cilium/cilium/pkg/api/socket.go +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package api - -import ( - "fmt" - "os" - "os/user" - "strconv" - - "github.com/sirupsen/logrus" - - "github.com/cilium/cilium/pkg/logging" - "github.com/cilium/cilium/pkg/logging/logfields" -) - -var log = logging.DefaultLogger.WithField(logfields.LogSubsys, "api") - -// getGroupIDByName returns the group ID for the given grpName. -func getGroupIDByName(grpName string) (int, error) { - group, err := user.LookupGroup(grpName) - if err != nil { - return -1, err - } - return strconv.Atoi(group.Gid) -} - -// SetDefaultPermissions sets the given socket's group to `CiliumGroupName` and -// mode to `SocketFileMode`. -func SetDefaultPermissions(socketPath string) error { - gid, err := getGroupIDByName(CiliumGroupName) - if err != nil { - log.WithError(err).WithFields(logrus.Fields{ - logfields.Path: socketPath, - "group": CiliumGroupName, - }).Debug("Group not found") - } else { - if err := os.Chown(socketPath, 0, gid); err != nil { - return fmt.Errorf("failed while setting up %s's group ID"+ - " in %q: %s", CiliumGroupName, socketPath, err) - } - } - if err := os.Chmod(socketPath, SocketFileMode); err != nil { - return fmt.Errorf("failed while setting up file permissions in %q: %w", - socketPath, err) - } - return nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/client.go b/vendor/github.com/cilium/cilium/pkg/client/client.go deleted file mode 100644 index 18483ef2b..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/client.go +++ /dev/null @@ -1,701 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "sort" - "strings" - "text/tabwriter" - "time" - - runtime_client "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - clientapi "github.com/cilium/cilium/api/v1/client" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/defaults" -) - -type Client struct { - clientapi.CiliumAPI -} - -// DefaultSockPath returns default UNIX domain socket path or -// path set using CILIUM_SOCK env variable -func DefaultSockPath() string { - // Check if environment variable points to socket - e := os.Getenv(defaults.SockPathEnv) - if e == "" { - // If unset, fall back to default value - e = defaults.SockPath - } - return "unix://" + e - -} - -func configureTransport(tr *http.Transport, proto, addr string) *http.Transport { - if tr == nil { - tr = &http.Transport{} - } - - if proto == "unix" { - // No need for compression in local communications. - tr.DisableCompression = true - tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { - return net.Dial(proto, addr) - } - } else { - tr.Proxy = http.ProxyFromEnvironment - tr.DialContext = (&net.Dialer{}).DialContext - } - - return tr -} - -// NewDefaultClient creates a client with default parameters connecting to UNIX domain socket. -func NewDefaultClient() (*Client, error) { - return NewClient("") -} - -// NewDefaultClientWithTimeout creates a client with default parameters connecting to UNIX -// domain socket and waits for cilium-agent availability. -func NewDefaultClientWithTimeout(timeout time.Duration) (*Client, error) { - timeoutAfter := time.After(timeout) - var c *Client - var err error - for { - select { - case <-timeoutAfter: - return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err) - default: - } - - c, err = NewDefaultClient() - if err != nil { - time.Sleep(500 * time.Millisecond) - continue - } - - for { - select { - case <-timeoutAfter: - return nil, fmt.Errorf("failed to create cilium agent client after %f seconds timeout: %s", timeout.Seconds(), err) - default: - } - // This is an API call that we do to the cilium-agent to check - // if it is up and running. - _, err = c.Daemon.GetConfig(nil) - if err != nil { - time.Sleep(500 * time.Millisecond) - continue - } - return c, nil - } - } -} - -// NewClient creates a client for the given `host`. -// If host is nil then use SockPath provided by CILIUM_SOCK -// or the cilium default SockPath -func NewClient(host string) (*Client, error) { - clientTrans, err := NewRuntime(host) - return &Client{*clientapi.New(clientTrans, strfmt.Default)}, err -} - -func NewRuntime(host string) (*runtime_client.Runtime, error) { - if host == "" { - host = DefaultSockPath() - } - tmp := strings.SplitN(host, "://", 2) - if len(tmp) != 2 { - return nil, fmt.Errorf("invalid host format '%s'", host) - } - - switch tmp[0] { - case "tcp": - if _, err := url.Parse("tcp://" + tmp[1]); err != nil { - return nil, err - } - host = "http://" + tmp[1] - case "unix": - host = tmp[1] - } - - transport := configureTransport(nil, tmp[0], host) - httpClient := &http.Client{Transport: transport} - clientTrans := runtime_client.NewWithClient(tmp[1], clientapi.DefaultBasePath, - clientapi.DefaultSchemes, httpClient) - return clientTrans, nil -} - -// Hint tries to improve the error message displayed to the user. -func Hint(err error) error { - if err == nil { - return err - } - - if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("Cilium API client timeout exceeded") - } - - e, _ := url.PathUnescape(err.Error()) - if strings.Contains(err.Error(), defaults.SockPath) { - return fmt.Errorf("%s\nIs the agent running?", e) - } - return fmt.Errorf("%s", e) -} - -func timeSince(since time.Time) string { - out := "never" - if !since.IsZero() { - t := time.Since(since) - out = t.Truncate(time.Second).String() + " ago" - } - - return out -} - -func stateUnhealthy(state string) bool { - return state == models.StatusStateWarning || - state == models.StatusStateFailure -} - -func statusUnhealthy(s *models.Status) bool { - if s != nil { - return stateUnhealthy(s.State) - } - return false -} - -// FormatStatusResponseBrief writes a one-line status to the writer. If -// everything ok, this is "ok", otherwise a message of the form "error in ..." -func FormatStatusResponseBrief(w io.Writer, sr *models.StatusResponse) { - msg := "" - - switch { - case statusUnhealthy(sr.Kvstore): - msg = fmt.Sprintf("kvstore: %s", sr.Kvstore.Msg) - case statusUnhealthy(sr.ContainerRuntime): - msg = fmt.Sprintf("container runtime: %s", sr.ContainerRuntime.Msg) - case sr.Kubernetes != nil && stateUnhealthy(sr.Kubernetes.State): - msg = fmt.Sprintf("kubernetes: %s", sr.Kubernetes.Msg) - case statusUnhealthy(sr.Cilium): - msg = fmt.Sprintf("cilium: %s", sr.Cilium.Msg) - case sr.Cluster != nil && statusUnhealthy(sr.Cluster.CiliumHealth): - msg = fmt.Sprintf("cilium-health: %s", sr.Cluster.CiliumHealth.Msg) - } - - // Only bother looking at controller failures if everything else is ok - if msg == "" { - for _, ctrl := range sr.Controllers { - if ctrl.Status == nil { - continue - } - if ctrl.Status.LastFailureMsg != "" { - msg = fmt.Sprintf("controller %s: %s", - ctrl.Name, ctrl.Status.LastFailureMsg) - break - } - } - } - - if msg == "" { - fmt.Fprintf(w, "OK\n") - } else { - fmt.Fprintf(w, "error in %s\n", msg) - } -} - -func clusterReadiness(cluster *models.RemoteCluster) string { - if !cluster.Ready { - return "not-ready" - } - return "ready" -} - -func numReadyClusters(clustermesh *models.ClusterMeshStatus) int { - numReady := 0 - for _, cluster := range clustermesh.Clusters { - if cluster.Ready { - numReady++ - } - } - return numReady -} - -type StatusDetails struct { - // AllAddress causes all addresses to be printed by FormatStatusResponse. - AllAddresses bool - // AllControllers causes all controllers to be printed by FormatStatusResponse. - AllControllers bool - // AllNodes causes all nodes to be printed by FormatStatusResponse. - AllNodes bool - // AllRedirects causes all redirects to be printed by FormatStatusResponse. - AllRedirects bool - // AllClusters causes all clusters to be printed by FormatStatusResponse. - AllClusters bool - // BPFMapDetails causes BPF map details to be printed by FormatStatusResponse. - BPFMapDetails bool - // KubeProxyReplacementDetails causes BPF kube-proxy details to be printed by FormatStatusResponse. - KubeProxyReplacementDetails bool - // ClockSourceDetails causes BPF time-keeping internals to be printed by FormatStatusResponse. - ClockSourceDetails bool -} - -var ( - // StatusAllDetails causes no additional status details to be printed by - // FormatStatusResponse. - StatusNoDetails = StatusDetails{} - // StatusAllDetails causes all status details to be printed by FormatStatusResponse. - StatusAllDetails = StatusDetails{ - AllAddresses: true, - AllControllers: true, - AllNodes: true, - AllRedirects: true, - AllClusters: true, - BPFMapDetails: true, - KubeProxyReplacementDetails: true, - ClockSourceDetails: true, - } -) - -// FormatStatusResponse writes a StatusResponse as a string to the writer. The bit mask sd controls -// whether a additional details are printed about a certain aspect of the status. In case there are -// errors, some details may be printed regardless of the value of sd. -func FormatStatusResponse(w io.Writer, sr *models.StatusResponse, sd StatusDetails) { - if sr.Kvstore != nil { - fmt.Fprintf(w, "KVStore:\t%s\t%s\n", sr.Kvstore.State, sr.Kvstore.Msg) - } - if sr.ContainerRuntime != nil { - fmt.Fprintf(w, "ContainerRuntime:\t%s\t%s\n", - sr.ContainerRuntime.State, sr.ContainerRuntime.Msg) - } - - kubeProxyDevices := "" - if sr.Kubernetes != nil { - fmt.Fprintf(w, "Kubernetes:\t%s\t%s\n", sr.Kubernetes.State, sr.Kubernetes.Msg) - if sr.Kubernetes.State != models.K8sStatusStateDisabled { - sort.Strings(sr.Kubernetes.K8sAPIVersions) - fmt.Fprintf(w, "Kubernetes APIs:\t[\"%s\"]\n", strings.Join(sr.Kubernetes.K8sAPIVersions, "\", \"")) - } - - } - if sr.KubeProxyReplacement != nil { - devices := "" - if sr.KubeProxyReplacement.Mode != models.KubeProxyReplacementModeDisabled { - for i, dev := range sr.KubeProxyReplacement.DeviceList { - kubeProxyDevices += fmt.Sprintf("%s %s", dev.Name, strings.Join(dev.IP, " ")) - if dev.Name == sr.KubeProxyReplacement.DirectRoutingDevice { - kubeProxyDevices += " (Direct Routing)" - } - if i+1 != len(sr.KubeProxyReplacement.Devices) { - kubeProxyDevices += ", " - } - } - if len(sr.KubeProxyReplacement.DeviceList) > 0 { - devices = "[" + kubeProxyDevices + "]" - } - } - fmt.Fprintf(w, "KubeProxyReplacement:\t%s\t%s\n", - sr.KubeProxyReplacement.Mode, devices) - } - if sr.HostFirewall != nil { - fmt.Fprintf(w, "Host firewall:\t%s", sr.HostFirewall.Mode) - if sr.HostFirewall.Mode != models.HostFirewallModeDisabled { - fmt.Fprintf(w, "\t[%s]", strings.Join(sr.HostFirewall.Devices, ", ")) - } - fmt.Fprintf(w, "\n") - } - - if sr.CniChaining != nil { - fmt.Fprintf(w, "CNI Chaining:\t%s\n", sr.CniChaining.Mode) - } - - if sr.CniFile != nil { - fmt.Fprintf(w, "CNI Config file:\t%s\n", sr.CniFile.Msg) - } - - if sr.Cilium != nil { - fmt.Fprintf(w, "Cilium:\t%s %s\n", sr.Cilium.State, sr.Cilium.Msg) - } - - if sr.Stale != nil { - sortedProbes := make([]string, 0, len(sr.Stale)) - for probe := range sr.Stale { - sortedProbes = append(sortedProbes, probe) - } - sort.Strings(sortedProbes) - - stalesStr := make([]string, 0, len(sr.Stale)) - for _, probe := range sortedProbes { - stalesStr = append(stalesStr, fmt.Sprintf("%q since %s", probe, sr.Stale[probe])) - } - - fmt.Fprintf(w, "Stale status:\t%s\n", strings.Join(stalesStr, ", ")) - } - - if nm := sr.NodeMonitor; nm != nil { - fmt.Fprintf(w, "NodeMonitor:\tListening for events on %d CPUs with %dx%d of shared memory\n", - nm.Cpus, nm.Npages, nm.Pagesize) - if nm.Lost != 0 || nm.Unknown != 0 { - fmt.Fprintf(w, "\t%d events lost, %d unknown notifications\n", nm.Lost, nm.Unknown) - } - } else { - fmt.Fprintf(w, "NodeMonitor:\tDisabled\n") - } - - if sr.Cluster != nil { - if sr.Cluster.CiliumHealth != nil { - ch := sr.Cluster.CiliumHealth - fmt.Fprintf(w, "Cilium health daemon:\t%s\t%s\n", ch.State, ch.Msg) - } - } - - if sr.Ipam != nil { - fmt.Fprintf(w, "IPAM:\t%s\n", sr.Ipam.Status) - if sd.AllAddresses { - fmt.Fprintf(w, "Allocated addresses:\n") - out := make([]string, 0, len(sr.Ipam.Allocations)) - for ip, owner := range sr.Ipam.Allocations { - out = append(out, fmt.Sprintf(" %s (%s)", ip, owner)) - } - sort.Strings(out) - for _, line := range out { - fmt.Fprintln(w, line) - } - } - } - - if sr.ClusterMesh != nil { - fmt.Fprintf(w, "ClusterMesh:\t%d/%d clusters ready, %d global-services\n", - numReadyClusters(sr.ClusterMesh), len(sr.ClusterMesh.Clusters), sr.ClusterMesh.NumGlobalServices) - - for _, cluster := range sr.ClusterMesh.Clusters { - if sd.AllClusters || !cluster.Ready { - fmt.Fprintf(w, " %s: %s, %d nodes, %d identities, %d services, %d failures (last: %s)\n", - cluster.Name, clusterReadiness(cluster), cluster.NumNodes, - cluster.NumIdentities, cluster.NumSharedServices, - cluster.NumFailures, timeSince(time.Time(cluster.LastFailure))) - fmt.Fprintf(w, " └ %s\n", cluster.Status) - } - } - } - - if sr.IPV6BigTCP != nil { - status := "Enabled" - if !sr.IPV6BigTCP.Enabled { - status = "Disabled" - } - fmt.Fprintf(w, "IPv6 BIG TCP:\t%s\n", status) - } - - if sr.BandwidthManager != nil { - var status string - if !sr.BandwidthManager.Enabled { - status = "Disabled" - } else { - status = fmt.Sprintf("EDT with BPF [%s] [%s]", - strings.ToUpper(sr.BandwidthManager.CongestionControl), - strings.Join(sr.BandwidthManager.Devices, ", ")) - } - fmt.Fprintf(w, "BandwidthManager:\t%s\n", status) - } - - if sr.HostRouting != nil { - fmt.Fprintf(w, "Host Routing:\t%s\n", sr.HostRouting.Mode) - } - - if sr.Masquerading != nil { - var status string - - enabled := func(enabled bool) string { - if enabled { - return "Enabled" - } - return "Disabled" - } - - if sr.Masquerading.EnabledProtocols == nil { - status = enabled(sr.Masquerading.Enabled) - } else if !sr.Masquerading.EnabledProtocols.IPV4 && !sr.Masquerading.EnabledProtocols.IPV6 { - status = enabled(false) - } else { - if sr.Masquerading.Mode == models.MasqueradingModeBPF { - if sr.Masquerading.IPMasqAgent { - status = "BPF (ip-masq-agent)" - } else { - status = "BPF" - } - if sr.KubeProxyReplacement != nil { - // When BPF Masquerading is enabled we don't do any masquerading for IPv6 - // traffic so no SNAT Exclusion IPv6 CIDR is listed in status output. - devStr := "" - for i, dev := range sr.KubeProxyReplacement.DeviceList { - devStr += dev.Name - if i+1 != len(sr.KubeProxyReplacement.DeviceList) { - devStr += ", " - } - } - status += fmt.Sprintf("\t[%s]\t%s", - devStr, - sr.Masquerading.SnatExclusionCidrV4) - } - - } else if sr.Masquerading.Mode == models.MasqueradingModeIptables { - status = "IPTables" - } - - status = fmt.Sprintf("%s [IPv4: %s, IPv6: %s]", status, - enabled(sr.Masquerading.EnabledProtocols.IPV4), enabled(sr.Masquerading.EnabledProtocols.IPV6)) - } - fmt.Fprintf(w, "Masquerading:\t%s\n", status) - } - - if sd.ClockSourceDetails && sr.ClockSource != nil { - status := sr.ClockSource.Mode - if sr.ClockSource.Mode == models.ClockSourceModeJiffies { - status = fmt.Sprintf("%s\t[%d Hz]", - sr.ClockSource.Mode, sr.ClockSource.Hertz) - } - fmt.Fprintf(w, "Clock Source for BPF:\t%s\n", status) - } - - if sr.Controllers != nil { - nFailing, out := 0, []string{" Name\tLast success\tLast error\tCount\tMessage\n"} - for _, ctrl := range sr.Controllers { - status := ctrl.Status - if status == nil { - continue - } - - if status.ConsecutiveFailureCount > 0 { - nFailing++ - } else if !sd.AllControllers { - continue - } - - failSince := timeSince(time.Time(status.LastFailureTimestamp)) - successSince := timeSince(time.Time(status.LastSuccessTimestamp)) - - err := "no error" - if status.LastFailureMsg != "" { - err = status.LastFailureMsg - } - - out = append(out, fmt.Sprintf(" %s\t%s\t%s\t%d\t%s\t\n", - ctrl.Name, successSince, failSince, status.ConsecutiveFailureCount, err)) - } - - nOK := len(sr.Controllers) - nFailing - fmt.Fprintf(w, "Controller Status:\t%d/%d healthy\n", nOK, len(sr.Controllers)) - if len(out) > 1 { - tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) - sort.Strings(out) - for _, s := range out { - fmt.Fprint(tab, s) - } - tab.Flush() - } - - } - - if sr.Proxy != nil { - fmt.Fprintf(w, "Proxy Status:\tOK, ip %s, %d redirects active on ports %s\n", - sr.Proxy.IP, sr.Proxy.TotalRedirects, sr.Proxy.PortRange) - if sd.AllRedirects && sr.Proxy.TotalRedirects > 0 { - out := make([]string, 0, len(sr.Proxy.Redirects)+1) - for _, r := range sr.Proxy.Redirects { - out = append(out, fmt.Sprintf(" %s\t%s\t%d\n", r.Proxy, r.Name, r.ProxyPort)) - } - tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) - fmt.Fprint(tab, " Protocol\tRedirect\tProxy Port\n") - sort.Strings(out) - for _, s := range out { - fmt.Fprint(tab, s) - } - tab.Flush() - } - } else { - fmt.Fprintf(w, "Proxy Status:\tNo managed proxy redirect\n") - } - - if sr.IdentityRange != nil { - fmt.Fprintf(w, "Global Identity Range:\tmin %d, max %d\n", - sr.IdentityRange.MinIdentity, sr.IdentityRange.MaxIdentity) - } else { - fmt.Fprintf(w, "Global Identity Range:\tUnknown\n") - } - - if sr.Hubble != nil { - var fields []string - - state := sr.Hubble.State - if sr.Hubble.Msg != "" { - state = fmt.Sprintf("%s %s", state, sr.Hubble.Msg) - } - fields = append(fields, state) - - if o := sr.Hubble.Observer; o != nil { - var observer []string - - if o.MaxFlows > 0 { - observer = append(observer, fmt.Sprintf("Current/Max Flows: %d/%d (%.2f%%)", - o.CurrentFlows, o.MaxFlows, (float64(o.CurrentFlows)/float64(o.MaxFlows))*100)) - } - if o.Uptime > 0 { - observer = append(observer, fmt.Sprintf("Flows/s: %.2f", - float64(o.SeenFlows)/time.Duration(o.Uptime).Seconds())) - } - - fields = append(fields, strings.Join(observer, ", ")) - } - - if sr.Hubble.Metrics != nil { - fields = append(fields, fmt.Sprintf("Metrics: %s", sr.Hubble.Metrics.State)) - } - - fmt.Fprintf(w, "Hubble:\t%s\n", strings.Join(fields, "\t")) - } - - if sd.KubeProxyReplacementDetails && sr.Kubernetes != nil && sr.KubeProxyReplacement != nil { - var selection, mode, xdp string - - lb := "Disabled" - cIP := "Enabled" - nPort := "Disabled" - if np := sr.KubeProxyReplacement.Features.NodePort; np.Enabled { - selection = np.Algorithm - if selection == models.KubeProxyReplacementFeaturesNodePortAlgorithmMaglev { - selection = fmt.Sprintf("%s (Table Size: %d)", np.Algorithm, np.LutSize) - } - xdp = np.Acceleration - mode = np.Mode - nPort = fmt.Sprintf("Enabled (Range: %d-%d)", np.PortMin, np.PortMax) - lb = "Enabled" - } - - affinity := "Disabled" - if sr.KubeProxyReplacement.Features.SessionAffinity.Enabled { - affinity = "Enabled" - } - - hPort := "Disabled" - if sr.KubeProxyReplacement.Features.HostPort.Enabled { - hPort = "Enabled" - } - - eIP := "Disabled" - if sr.KubeProxyReplacement.Features.ExternalIPs.Enabled { - eIP = "Enabled" - } - - socketLB := "Disabled" - if slb := sr.KubeProxyReplacement.Features.SocketLB; slb.Enabled { - socketLB = "Enabled" - } - - socketLBTracing := "Disabled" - if st := sr.KubeProxyReplacement.Features.SocketLBTracing; st.Enabled { - socketLBTracing = "Enabled" - } - - gracefulTerm := "Disabled" - if sr.KubeProxyReplacement.Features.GracefulTermination.Enabled { - gracefulTerm = "Enabled" - } - - nat46X64 := "Disabled" - nat46X64GW := "Disabled" - nat46X64SVC := "Disabled" - prefixes := "" - if sr.KubeProxyReplacement.Features.Nat46X64.Enabled { - nat46X64 = "Enabled" - if svc := sr.KubeProxyReplacement.Features.Nat46X64.Service; svc.Enabled { - nat46X64SVC = "Enabled" - } - if gw := sr.KubeProxyReplacement.Features.Nat46X64.Gateway; gw.Enabled { - nat46X64GW = "Enabled" - prefixes = strings.Join(gw.Prefixes, ", ") - } - } - - fmt.Fprintf(w, "KubeProxyReplacement Details:\n") - tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) - fmt.Fprintf(tab, " Status:\t%s\n", sr.KubeProxyReplacement.Mode) - fmt.Fprintf(tab, " Socket LB:\t%s\n", socketLB) - fmt.Fprintf(tab, " Socket LB Tracing:\t%s\n", socketLBTracing) - if kubeProxyDevices != "" { - fmt.Fprintf(tab, " Devices:\t%s\n", kubeProxyDevices) - } - if mode != "" { - fmt.Fprintf(tab, " Mode:\t%s\n", mode) - } - if selection != "" { - fmt.Fprintf(tab, " Backend Selection:\t%s\n", selection) - } - fmt.Fprintf(tab, " Session Affinity:\t%s\n", affinity) - fmt.Fprintf(tab, " Graceful Termination:\t%s\n", gracefulTerm) - if nat46X64 == "Disabled" { - fmt.Fprintf(tab, " NAT46/64 Support:\t%s\n", nat46X64) - } else { - fmt.Fprintf(tab, " NAT46/64 Support:\n") - fmt.Fprintf(tab, " - Services:\t%s\n", nat46X64SVC) - fmt.Fprintf(tab, " - Gateway:\t%s\n", nat46X64GW) - if nat46X64GW == "Enabled" && prefixes != "" { - fmt.Fprintf(tab, " Prefixes:\t%s\n", prefixes) - } - } - if xdp != "" { - fmt.Fprintf(tab, " XDP Acceleration:\t%s\n", xdp) - } - fmt.Fprintf(tab, " Services:\n") - fmt.Fprintf(tab, " - ClusterIP:\t%s\n", cIP) - fmt.Fprintf(tab, " - NodePort:\t%s \n", nPort) - fmt.Fprintf(tab, " - LoadBalancer:\t%s \n", lb) - fmt.Fprintf(tab, " - externalIPs:\t%s \n", eIP) - fmt.Fprintf(tab, " - HostPort:\t%s\n", hPort) - tab.Flush() - } - - if sd.BPFMapDetails && sr.BpfMaps != nil { - dynamicSizingStatus := "off" - ratio := sr.BpfMaps.DynamicSizeRatio - if 0.0 < ratio && ratio <= 1.0 { - dynamicSizingStatus = fmt.Sprintf("on (ratio: %f)", ratio) - } - fmt.Fprintf(w, "BPF Maps:\tdynamic sizing: %s\n", dynamicSizingStatus) - tab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0) - fmt.Fprintf(tab, " Name\tSize\n") - for _, m := range sr.BpfMaps.Maps { - fmt.Fprintf(tab, " %s\t%d\n", m.Name, m.Size) - } - tab.Flush() - } - - if sr.Encryption != nil { - fields := []string{sr.Encryption.Mode} - - if sr.Encryption.Msg != "" { - fields = append(fields, sr.Encryption.Msg) - } else if wg := sr.Encryption.Wireguard; wg != nil { - ifaces := make([]string, 0, len(wg.Interfaces)) - for _, i := range wg.Interfaces { - iface := fmt.Sprintf("%s (Pubkey: %s, Port: %d, Peers: %d)", - i.Name, i.PublicKey, i.ListenPort, i.PeerCount) - ifaces = append(ifaces, iface) - } - fields = append(fields, fmt.Sprintf("[%s]", strings.Join(ifaces, ", "))) - } - - fmt.Fprintf(w, "Encryption:\t%s\n", strings.Join(fields, "\t")) - } -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/config.go b/vendor/github.com/cilium/cilium/pkg/client/config.go deleted file mode 100644 index 3775abe2e..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/config.go +++ /dev/null @@ -1,38 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/daemon" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" -) - -// ConfigGet returns a daemon configuration. -func (c *Client) ConfigGet() (*models.DaemonConfiguration, error) { - resp, err := c.Daemon.GetConfig(nil) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// ConfigPatch modifies the daemon configuration. -func (c *Client) ConfigPatch(cfg models.DaemonConfigurationSpec) error { - fullCfg, err := c.ConfigGet() - if err != nil { - return err - } - - for opt, value := range cfg.Options { - fullCfg.Spec.Options[opt] = value - } - if cfg.PolicyEnforcement != "" { - fullCfg.Spec.PolicyEnforcement = cfg.PolicyEnforcement - } - - params := daemon.NewPatchConfigParams().WithConfiguration(fullCfg.Spec).WithTimeout(api.ClientTimeout) - _, err = c.Daemon.PatchConfig(params) - return Hint(err) -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/endpoint.go b/vendor/github.com/cilium/cilium/pkg/client/endpoint.go deleted file mode 100644 index 6fb289e45..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/endpoint.go +++ /dev/null @@ -1,141 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/endpoint" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" - pkgEndpointID "github.com/cilium/cilium/pkg/endpoint/id" - "github.com/cilium/cilium/pkg/labels" -) - -// EndpointList returns a list of all endpoints -func (c *Client) EndpointList() ([]*models.Endpoint, error) { - resp, err := c.Endpoint.GetEndpoint(nil) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// EndpointGet returns endpoint by ID -func (c *Client) EndpointGet(id string) (*models.Endpoint, error) { - params := endpoint.NewGetEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout) - resp, err := c.Endpoint.GetEndpointID(params) - if err != nil { - /* Since plugins rely on checking the error type, we don't wrap this - * with Hint(...) - */ - return nil, err - } - return resp.Payload, nil -} - -// EndpointCreate creates a new endpoint -func (c *Client) EndpointCreate(ep *models.EndpointChangeRequest) error { - id := pkgEndpointID.NewCiliumID(ep.ID) - params := endpoint.NewPutEndpointIDParams().WithID(id).WithEndpoint(ep).WithTimeout(api.ClientTimeout) - _, err := c.Endpoint.PutEndpointID(params) - return Hint(err) -} - -// EndpointPatch modifies the endpoint -func (c *Client) EndpointPatch(id string, ep *models.EndpointChangeRequest) error { - params := endpoint.NewPatchEndpointIDParams().WithID(id).WithEndpoint(ep).WithTimeout(api.ClientTimeout) - _, err := c.Endpoint.PatchEndpointID(params) - return Hint(err) -} - -// EndpointDelete deletes endpoint -func (c *Client) EndpointDelete(id string) error { - params := endpoint.NewDeleteEndpointIDParams().WithID(id).WithTimeout(api.ClientTimeout) - _, _, err := c.Endpoint.DeleteEndpointID(params) - return Hint(err) -} - -// EndpointLogGet returns endpoint log -func (c *Client) EndpointLogGet(id string) (models.EndpointStatusLog, error) { - params := endpoint.NewGetEndpointIDLogParams().WithID(id).WithTimeout(api.ClientTimeout) - resp, err := c.Endpoint.GetEndpointIDLog(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// EndpointHealthGet returns endpoint healthz -func (c *Client) EndpointHealthGet(id string) (*models.EndpointHealth, error) { - params := endpoint.NewGetEndpointIDHealthzParams().WithID(id).WithTimeout(api.ClientTimeout) - resp, err := c.Endpoint.GetEndpointIDHealthz(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// EndpointConfigGet returns endpoint configuration -func (c *Client) EndpointConfigGet(id string) (*models.EndpointConfigurationStatus, error) { - params := endpoint.NewGetEndpointIDConfigParams().WithID(id).WithTimeout(api.ClientTimeout) - resp, err := c.Endpoint.GetEndpointIDConfig(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// EndpointConfigPatch modifies endpoint configuration -func (c *Client) EndpointConfigPatch(id string, cfg *models.EndpointConfigurationSpec) error { - params := endpoint.NewPatchEndpointIDConfigParams().WithID(id).WithTimeout(api.ClientTimeout) - if cfg != nil { - params.SetEndpointConfiguration(cfg) - } - - _, err := c.Endpoint.PatchEndpointIDConfig(params) - return Hint(err) -} - -// EndpointLabelsGet returns endpoint label configuration -func (c *Client) EndpointLabelsGet(id string) (*models.LabelConfiguration, error) { - params := endpoint.NewGetEndpointIDLabelsParams().WithID(id).WithTimeout(api.ClientTimeout) - resp, err := c.Endpoint.GetEndpointIDLabels(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// EndpointLabelsPut modifies endpoint label configuration -// add: List of labels to add and enable. If the label is an orchestration -// system label which has been disabled before, it will be removed from -// the disabled list and readded to the orchestration list. Otherwise -// it will be added to the custom label list. -// -// delete: List of labels to delete. If the label is an orchestration system -// label, then it will be deleted from the orchestration list and -// added to the disabled list. Otherwise it will be removed from the -// custom list. -func (c *Client) EndpointLabelsPatch(id string, toAdd, toDelete models.Labels) error { - currentCfg, err := c.EndpointLabelsGet(id) - if err != nil { - return err - } - - userLbl := labels.NewLabelsFromModel(currentCfg.Status.Realized.User) - for _, lbl := range toAdd { - lblParsed := labels.ParseLabel(lbl) - if _, found := userLbl[lblParsed.Key]; !found { - userLbl[lblParsed.Key] = lblParsed - } - } - for _, lbl := range toDelete { - lblParsed := labels.ParseLabel(lbl) - delete(userLbl, lblParsed.Key) - } - currentCfg.Spec.User = userLbl.GetModel() - - params := endpoint.NewPatchEndpointIDLabelsParams().WithID(id).WithTimeout(api.ClientTimeout) - _, err = c.Endpoint.PatchEndpointIDLabels(params.WithConfiguration(currentCfg.Spec)) - return Hint(err) -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/identity.go b/vendor/github.com/cilium/cilium/pkg/client/identity.go deleted file mode 100644 index 483509ea7..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/identity.go +++ /dev/null @@ -1,21 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/policy" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" -) - -// IdentityGet returns a security identity. -func (c *Client) IdentityGet(id string) (*models.Identity, error) { - params := policy.NewGetIdentityIDParams().WithID(id).WithTimeout(api.ClientTimeout) - - resp, err := c.Policy.GetIdentityID(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/ipam.go b/vendor/github.com/cilium/cilium/pkg/client/ipam.go deleted file mode 100644 index e50bbfb51..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/ipam.go +++ /dev/null @@ -1,57 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/ipam" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" -) - -const ( - AddressFamilyIPv6 = "ipv6" - AddressFamilyIPv4 = "ipv4" -) - -// IPAMAllocate allocates an IP address out of address family specific pool. -func (c *Client) IPAMAllocate(family, owner, pool string, expiration bool) (*models.IPAMResponse, error) { - params := ipam.NewPostIpamParams().WithTimeout(api.ClientTimeout) - - if family != "" { - params.SetFamily(&family) - } - if owner != "" { - params.SetOwner(&owner) - } - if pool != "" { - params.SetPool(&pool) - } - params.SetExpiration(&expiration) - - resp, err := c.Ipam.PostIpam(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// IPAMAllocateIP tries to allocate a particular IP address. -func (c *Client) IPAMAllocateIP(ip, owner, pool string) error { - params := ipam.NewPostIpamIPParams().WithIP(ip).WithOwner(&owner).WithTimeout(api.ClientTimeout) - if pool != "" { - params.SetPool(&pool) - } - _, err := c.Ipam.PostIpamIP(params) - return Hint(err) -} - -// IPAMReleaseIP releases a IP address back to the pool. -func (c *Client) IPAMReleaseIP(ip, pool string) error { - params := ipam.NewDeleteIpamIPParams().WithIP(ip).WithTimeout(api.ClientTimeout) - if pool != "" { - params.SetPool(&pool) - } - _, err := c.Ipam.DeleteIpamIP(params) - return Hint(err) -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/lrp.go b/vendor/github.com/cilium/cilium/pkg/client/lrp.go deleted file mode 100644 index c1108b497..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/lrp.go +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/models" -) - -// GetLRPs returns a list of all local redirect policies. -func (c *Client) GetLRPs() ([]*models.LRPSpec, error) { - resp, err := c.Service.GetLrp(nil) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/policy.go b/vendor/github.com/cilium/cilium/pkg/client/policy.go deleted file mode 100644 index 5930c1766..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/policy.go +++ /dev/null @@ -1,50 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/policy" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" -) - -// PolicyPut inserts the `policyJSON` -func (c *Client) PolicyPut(policyJSON string) (*models.Policy, error) { - params := policy.NewPutPolicyParams().WithPolicy(policyJSON).WithTimeout(api.ClientTimeout) - resp, err := c.Policy.PutPolicy(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// PolicyGet returns policy rules -func (c *Client) PolicyGet(labels []string) (*models.Policy, error) { - params := policy.NewGetPolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout) - resp, err := c.Policy.GetPolicy(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// PolicyCacheGet returns the contents of a SelectorCache. -func (c *Client) PolicyCacheGet() (models.SelectorCache, error) { - params := policy.NewGetPolicySelectorsParams().WithTimeout(api.ClientTimeout) - resp, err := c.Policy.GetPolicySelectors(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// PolicyDelete deletes policy rules -func (c *Client) PolicyDelete(labels []string) (*models.Policy, error) { - params := policy.NewDeletePolicyParams().WithLabels(labels).WithTimeout(api.ClientTimeout) - resp, err := c.Policy.DeletePolicy(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, Hint(err) -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/prefilter.go b/vendor/github.com/cilium/cilium/pkg/client/prefilter.go deleted file mode 100644 index 79de98e1b..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/prefilter.go +++ /dev/null @@ -1,39 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/prefilter" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" -) - -// GetPrefilter returns a list of all CIDR prefixes -func (c *Client) GetPrefilter() (*models.Prefilter, error) { - resp, err := c.Prefilter.GetPrefilter(nil) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// PatchPrefilter sets a list of CIDR prefixes -func (c *Client) PatchPrefilter(spec *models.PrefilterSpec) (*models.Prefilter, error) { - params := prefilter.NewPatchPrefilterParams().WithPrefilterSpec(spec).WithTimeout(api.ClientTimeout) - resp, err := c.Prefilter.PatchPrefilter(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// DeletePrefilter deletes a list of CIDR prefixes -func (c *Client) DeletePrefilter(spec *models.PrefilterSpec) (*models.Prefilter, error) { - params := prefilter.NewDeletePrefilterParams().WithPrefilterSpec(spec).WithTimeout(api.ClientTimeout) - resp, err := c.Prefilter.DeletePrefilter(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/recorder.go b/vendor/github.com/cilium/cilium/pkg/client/recorder.go deleted file mode 100644 index 4da24a72b..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/recorder.go +++ /dev/null @@ -1,47 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/recorder" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" -) - -func (c *Client) GetRecorder() ([]*models.Recorder, error) { - resp, err := c.Recorder.GetRecorder(nil) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -func (c *Client) GetRecorderMasks() ([]*models.RecorderMask, error) { - resp, err := c.Recorder.GetRecorderMasks(nil) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -func (c *Client) GetRecorderID(id int64) (*models.Recorder, error) { - params := recorder.NewGetRecorderIDParams().WithID(id).WithTimeout(api.ClientTimeout) - resp, err := c.Recorder.GetRecorderID(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -func (c *Client) PutRecorderID(id int64, rec *models.RecorderSpec) (bool, error) { - params := recorder.NewPutRecorderIDParams().WithID(id).WithConfig(rec).WithTimeout(api.ClientTimeout) - _, created, err := c.Recorder.PutRecorderID(params) - return created != nil, Hint(err) -} - -func (c *Client) DeleteRecorderID(id int64) error { - params := recorder.NewDeleteRecorderIDParams().WithID(id).WithTimeout(api.ClientTimeout) - _, err := c.Recorder.DeleteRecorderID(params) - return Hint(err) -} diff --git a/vendor/github.com/cilium/cilium/pkg/client/service.go b/vendor/github.com/cilium/cilium/pkg/client/service.go deleted file mode 100644 index fa53468c7..000000000 --- a/vendor/github.com/cilium/cilium/pkg/client/service.go +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "github.com/cilium/cilium/api/v1/client/service" - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/api" -) - -// GetServices returns a list of all services. -func (c *Client) GetServices() ([]*models.Service, error) { - resp, err := c.Service.GetService(nil) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// GetServiceID returns a service by ID. -func (c *Client) GetServiceID(id int64) (*models.Service, error) { - params := service.NewGetServiceIDParams().WithID(id).WithTimeout(api.ClientTimeout) - resp, err := c.Service.GetServiceID(params) - if err != nil { - return nil, Hint(err) - } - return resp.Payload, nil -} - -// PutServiceID creates or updates a service. Returns true if service was created. -func (c *Client) PutServiceID(id int64, svc *models.ServiceSpec) (bool, error) { - svc.ID = id - params := service.NewPutServiceIDParams().WithID(id).WithConfig(svc).WithTimeout(api.ClientTimeout) - _, created, err := c.Service.PutServiceID(params) - return created != nil, Hint(err) -} - -// DeleteServiceID deletes a service by ID. -func (c *Client) DeleteServiceID(id int64) error { - params := service.NewDeleteServiceIDParams().WithID(id).WithTimeout(api.ClientTimeout) - _, err := c.Service.DeleteServiceID(params) - return Hint(err) -} diff --git a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go index c6eb771b1..234e49eb1 100644 --- a/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go +++ b/vendor/github.com/cilium/cilium/pkg/clustermesh/types/types.go @@ -29,6 +29,13 @@ func ValidateClusterID(clusterID uint32) error { type CiliumClusterConfig struct { ID uint32 `json:"id,omitempty"` + + Capabilities CiliumClusterConfigCapabilities `json:"capabilities,omitempty"` +} + +type CiliumClusterConfigCapabilities struct { + // Supports per-prefix "synced" canaries + SyncedCanaries bool `json:"syncedCanaries,omitempty"` } func (c0 *CiliumClusterConfig) IsCompatible(c1 *CiliumClusterConfig) error { @@ -52,3 +59,9 @@ func (c0 *CiliumClusterConfig) IsCompatible(c1 *CiliumClusterConfig) error { } return nil } + +// ClusterIDName groups together the ClusterID and the ClusterName +type ClusterIDName struct { + ClusterID uint32 + ClusterName string +} diff --git a/vendor/github.com/cilium/cilium/pkg/command/map_string.go b/vendor/github.com/cilium/cilium/pkg/command/map_string.go index 904182f5b..c371f91fe 100644 --- a/vendor/github.com/cilium/cilium/pkg/command/map_string.go +++ b/vendor/github.com/cilium/cilium/pkg/command/map_string.go @@ -32,10 +32,17 @@ func GetStringMapString(vp *viper.Viper, key string) map[string]string { // GetStringMapStringE is same as GetStringMapString, but with error func GetStringMapStringE(vp *viper.Viper, key string) (map[string]string, error) { - data := vp.Get(key) + return ToStringMapStringE(vp.Get(key)) +} + +// ToStringMapStringE casts an interface to a map[string]string type. The underlying +// interface type might be a map or string. In the latter case, it is attempted to be +// json decoded, falling back to the k1=v2,k2=v2 format in case it doesn't look like json. +func ToStringMapStringE(data interface{}) (map[string]string, error) { if data == nil { return map[string]string{}, nil } + v, err := cast.ToStringMapStringE(data) if err != nil { var syntaxErr *json.SyntaxError diff --git a/vendor/github.com/cilium/cilium/pkg/common/const.go b/vendor/github.com/cilium/cilium/pkg/common/const.go deleted file mode 100644 index 365d9ae28..000000000 --- a/vendor/github.com/cilium/cilium/pkg/common/const.go +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package common - -const ( - // Miscellaneous dedicated constants - - // CHeaderFileName is the name of the C header file for BPF programs for a - // particular endpoint. - CHeaderFileName = "ep_config.h" - - // PossibleCPUSysfsPath is used to retrieve the number of CPUs for per-CPU maps. - PossibleCPUSysfsPath = "/sys/devices/system/cpu/possible" -) diff --git a/vendor/github.com/cilium/cilium/pkg/common/utils.go b/vendor/github.com/cilium/cilium/pkg/common/utils.go deleted file mode 100644 index d75e622a2..000000000 --- a/vendor/github.com/cilium/cilium/pkg/common/utils.go +++ /dev/null @@ -1,137 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package common - -import ( - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/sirupsen/logrus" - - "github.com/cilium/cilium/pkg/safeio" -) - -// C2GoArray transforms an hexadecimal string representation into a byte slice. -// Example: -// str := "0x12, 0xff, 0x0, 0x1" -// fmt.Print(C2GoArray(str)) //`{0x12, 0xFF, 0x0, 0x01}`" -func C2GoArray(str string) []byte { - ret := []byte{} - - if str == "" { - return ret - } - - hexStr := strings.Split(str, ", ") - for _, hexDigit := range hexStr { - strDigit := strings.TrimPrefix(hexDigit, "0x") - digitUint64, err := strconv.ParseUint(strDigit, 16, 8) - if err != nil { - return nil - } - ret = append(ret, byte(digitUint64)) - } - return ret -} - -// GoArray2C transforms a byte slice into its hexadecimal string representation. -// Example: -// array := []byte{0x12, 0xFF, 0x0, 0x01} -// fmt.Print(GoArray2C(array)) // "{ 0x12, 0xff, 0x0, 0x1 }" -func GoArray2C(array []byte) string { - return goArray2C(array, true) -} - -// GoArray2CNoSpaces does the same as GoArray2C, but no spaces are used in -// the final output. -// Example: -// array := []byte{0x12, 0xFF, 0x0, 0x01} -// fmt.Print(GoArray2CNoSpaces(array)) // "{0x12,0xff,0x0,0x1}" -func GoArray2CNoSpaces(array []byte) string { - return goArray2C(array, false) -} - -func goArray2C(array []byte, space bool) string { - ret := "" - format := ",%#x" - if space { - format = ", %#x" - } - - for i, e := range array { - if i == 0 { - ret = ret + fmt.Sprintf("%#x", e) - } else { - ret = ret + fmt.Sprintf(format, e) - } - } - return ret -} - -// RequireRootPrivilege checks if the user running cmd is root. If not, it exits the program -func RequireRootPrivilege(cmd string) { - if os.Getuid() != 0 { - fmt.Fprintf(os.Stderr, "Please run %q command(s) with root privileges.\n", cmd) - os.Exit(1) - } -} - -// MapStringStructToSlice returns a slice with all keys of the given -// map[string]struct{} -func MapStringStructToSlice(m map[string]struct{}) []string { - s := make([]string, 0, len(m)) - for k := range m { - s = append(s, k) - } - return s -} - -// GetNumPossibleCPUs returns a total number of possible CPUS, i.e. CPUs that -// have been allocated resources and can be brought online if they are present. -// The number is retrieved by parsing /sys/devices/system/cpu/possible. -// -// See https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/cpumask.h?h=v4.19#n50 -// for more details. -func GetNumPossibleCPUs(log *logrus.Entry) int { - f, err := os.Open(PossibleCPUSysfsPath) - if err != nil { - log.WithError(err).Errorf("unable to open %q", PossibleCPUSysfsPath) - return 0 - } - defer f.Close() - - return getNumPossibleCPUsFromReader(log, f) -} - -func getNumPossibleCPUsFromReader(log *logrus.Entry, r io.Reader) int { - out, err := safeio.ReadAllLimit(r, safeio.KB) - if err != nil { - log.WithError(err).Errorf("unable to read %q to get CPU count", PossibleCPUSysfsPath) - return 0 - } - - var start, end int - count := 0 - for _, s := range strings.Split(string(out), ",") { - // Go's scanf will return an error if a format cannot be fully matched. - // So, just ignore it, as a partial match (e.g. when there is only one - // CPU) is expected. - n, err := fmt.Sscanf(s, "%d-%d", &start, &end) - - switch n { - case 0: - log.WithError(err).Errorf("failed to scan %q to retrieve number of possible CPUs!", s) - return 0 - case 1: - count++ - default: - count += (end - start + 1) - } - } - - return count -} diff --git a/vendor/github.com/cilium/cilium/pkg/components/components.go b/vendor/github.com/cilium/cilium/pkg/components/components.go deleted file mode 100644 index 38f483a07..000000000 --- a/vendor/github.com/cilium/cilium/pkg/components/components.go +++ /dev/null @@ -1,25 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package components - -import ( - "os" - "strings" -) - -const ( - // CiliumAgentName is the name of cilium-agent (daemon) process name. - CiliumAgentName = "cilium-agent" - // CiliumOperatortName is the name of cilium-operator process name. - CiliumOperatortName = "cilium-operator" - // CiliumDaemonTestName is the name of test binary for daemon package. - CiliumDaemonTestName = "cmd.test" -) - -// IsCiliumAgent checks whether the current process is cilium-agent (daemon). -func IsCiliumAgent() bool { - binaryName := os.Args[0] - return strings.HasSuffix(binaryName, CiliumAgentName) || - strings.HasSuffix(binaryName, CiliumDaemonTestName) -} diff --git a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go index 70e6334a7..75729cd27 100644 --- a/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go +++ b/vendor/github.com/cilium/cilium/pkg/defaults/defaults.go @@ -103,21 +103,18 @@ const ( // from previous state automatically EnableHostIPRestore = true - // DefaultMapRoot is the default path where BPFFS should be mounted - DefaultMapRoot = "/sys/fs/bpf" + // BPFFSRoot is the default path where BPFFS should be mounted + BPFFSRoot = "/sys/fs/bpf" - // DefaultCgroupRoot is the default path where cilium cgroup2 should be mounted - DefaultCgroupRoot = "/run/cilium/cgroupv2" - - // SockopsEnable controsl whether sockmap should be used - SockopsEnable = false - - // DefaultMapRootFallback is the path which is used when /sys/fs/bpf has + // BPFFSRootFallback is the path which is used when /sys/fs/bpf has // a mount, but with the other filesystem than BPFFS. - DefaultMapRootFallback = "/run/cilium/bpffs" + BPFFSRootFallback = "/run/cilium/bpffs" - // DefaultMapPrefix is the default prefix for all BPF maps. - DefaultMapPrefix = "tc/globals" + // TCGlobalsPath is the default prefix for all BPF maps. + TCGlobalsPath = "tc/globals" + + // DefaultCgroupRoot is the default path where cilium cgroup2 should be mounted + DefaultCgroupRoot = "/run/cilium/cgroupv2" // DNSMaxIPsPerRestoredRule defines the maximum number of IPs to maintain // for each FQDN selector in endpoint's restored DNS rules. @@ -219,6 +216,14 @@ const ( // EnableIPSec is the default value for IPSec enablement EnableIPSec = false + // IPsecKeyRotationDuration is the time to wait before removing old keys when + // the IPsec key is changing. + IPsecKeyRotationDuration = 5 * time.Minute + + // Enable watcher for IPsec key. If disabled, a restart of the agent will + // be necessary on key rotations. + EnableIPsecKeyWatcher = true + // EncryptNode enables encrypting traffic from host networking applications // which are not part of Cilium manged pods. EncryptNode = false @@ -325,11 +330,6 @@ const ( // LoopbackIPv4 is the default address for service loopback LoopbackIPv4 = "169.254.42.1" - // ForceLocalPolicyEvalAtSource is the default value for - // option.ForceLocalPolicyEvalAtSource. It can be enabled to provide - // backwards compatibility. - ForceLocalPolicyEvalAtSource = false - // EnableEndpointRoutes is the value for option.EnableEndpointRoutes. // It is disabled by default for backwards compatibility. EnableEndpointRoutes = false @@ -370,6 +370,10 @@ const ( // CiliumNode.Spec.IPAM.PreAllocate if no value is set IPAMPreAllocation = 8 + // IPAMMultiPoolPreAllocation is the default value for multi-pool IPAM + // pre-allocations + IPAMMultiPoolPreAllocation = "default=8" + // ENIFirstInterfaceIndex is the default value for // CiliumNode.Spec.ENI.FirstInterfaceIndex if no value is set. ENIFirstInterfaceIndex = 0 @@ -460,7 +464,7 @@ const ( CertsDirectory = RuntimePath + "/certs" // EnableRemoteNodeIdentity is the default value for option.EnableRemoteNodeIdentity - EnableRemoteNodeIdentity = false + EnableRemoteNodeIdentity = true // IPAMExpiration is the timeout after which an IP subject to expiratio // is being released again if no endpoint is being created in time. @@ -482,6 +486,11 @@ const ( // for local traffic EnableIdentityMark = true + // EnableHighScaleIPcache enables the special ipcache mode for high scale + // clusters. The ipcache content will be reduced to the strict minimum and + // traffic will be encapsulated to carry security identities. + EnableHighScaleIPcache = false + // K8sEnableLeasesFallbackDiscovery enables k8s to fallback to API probing to check // for the support of Leases in Kubernetes when there is an error in discovering // API groups using Discovery API. @@ -506,6 +515,15 @@ const ( // EnableICMPRules enables ICMP-based rule support for Cilium Network Policies. EnableICMPRules = true + // RoutingMode enables choosing between native routing mode or tunneling mode. + RoutingMode = "tunnel" + + // TunnelProtocol is the default tunneling protocol + TunnelProtocol = "vxlan" + + // Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation. + UseCiliumInternalIPForIPsec = false + // TunnelPortVXLAN is the default VXLAN port TunnelPortVXLAN = 8472 // TunnelPortGeneve is the default Geneve port diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go b/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go deleted file mode 100644 index ef99cf078..000000000 --- a/vendor/github.com/cilium/cilium/pkg/endpoint/id/id.go +++ /dev/null @@ -1,118 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package id - -import ( - "fmt" - "math" - "net" - "strconv" - "strings" -) - -// MaxEndpointID is the maximum endpoint identifier. -const MaxEndpointID = math.MaxUint16 - -// PrefixType describes the type of endpoint identifier -type PrefixType string - -func (s PrefixType) String() string { return string(s) } - -const ( - // CiliumLocalIdPrefix is a numeric identifier with local scope. It has - // no cluster wide meaning and is only unique in the scope of a single - // agent. An endpoint is guaranteed to always have a local scope identifier. - CiliumLocalIdPrefix PrefixType = "cilium-local" - - // CiliumGlobalIdPrefix is an endpoint identifier with global scope. - // This addressing mechanism is currently unused. - CiliumGlobalIdPrefix PrefixType = "cilium-global" - - // ContainerIdPrefix is used to address an endpoint via its primary - // container ID. The container ID is specific to the container runtime - // in use. Only the primary container that defines the networking scope - // can be used to address an endpoint. - ContainerIdPrefix PrefixType = "container-id" - - // DockerEndpointPrefix is used to address an endpoint via the Docker - // endpoint ID. This method is only possible if the endpoint was - // created via the cilium-docker plugin and the container is backed by - // the libnetwork abstraction. - DockerEndpointPrefix PrefixType = "docker-endpoint" - - // ContainerNamePrefix is used to address the endpoint via the - // container's name. This addressing mechanism depends on the container - // runtime. Only the primary container that the networking scope can be - // used to address an endpoint. - ContainerNamePrefix PrefixType = "container-name" - - // PodNamePrefix is used to address an endpoint via the Kubernetes pod - // name. This addressing only works if the endpoint represents as - // Kubernetes pod. - PodNamePrefix PrefixType = "pod-name" - - // IPv4Prefix is used to address an endpoint via the endpoint's IPv4 - // address. - IPv4Prefix PrefixType = "ipv4" - - // IPv6Prefix is the prefix used to refer to an endpoint via IPv6 address - IPv6Prefix PrefixType = "ipv6" -) - -// NewCiliumID returns a new endpoint identifier of type CiliumLocalIdPrefix -func NewCiliumID(id int64) string { - return fmt.Sprintf("%s:%d", CiliumLocalIdPrefix, id) -} - -// NewID returns a new endpoint identifier -func NewID(prefix PrefixType, id string) string { - return string(prefix) + ":" + id -} - -// NewIPPrefixID returns an identifier based on the IP address specified -func NewIPPrefixID(ip net.IP) string { - if ip.To4() != nil { - return NewID(IPv4Prefix, ip.String()) - } - - return NewID(IPv6Prefix, ip.String()) -} - -// splitID splits ID into prefix and id. No validation is performed on prefix. -func splitID(id string) (PrefixType, string) { - if idx := strings.Index(id, ":"); idx > -1 { - return PrefixType(id[:idx]), id[idx+1:] - } - - // default prefix - return CiliumLocalIdPrefix, id -} - -// ParseCiliumID parses id as cilium endpoint id and returns numeric portion. -func ParseCiliumID(id string) (int64, error) { - prefix, id := splitID(id) - if prefix != CiliumLocalIdPrefix { - return 0, fmt.Errorf("not a cilium identifier") - } - n, err := strconv.ParseInt(id, 0, 64) - if err != nil || n < 0 { - return 0, fmt.Errorf("invalid numeric cilium id: %s", err) - } - if n > MaxEndpointID { - return 0, fmt.Errorf("endpoint id too large: %d", n) - } - return n, nil -} - -// Parse parses a string as an endpoint identified consists of an optional -// prefix [prefix:] followed by the identifier. -func Parse(id string) (PrefixType, string, error) { - prefix, id := splitID(id) - switch prefix { - case CiliumLocalIdPrefix, CiliumGlobalIdPrefix, ContainerIdPrefix, DockerEndpointPrefix, ContainerNamePrefix, PodNamePrefix, IPv4Prefix, IPv6Prefix: - return prefix, id, nil - } - - return "", "", fmt.Errorf("unknown endpoint ID prefix \"%s\"", prefix) -} diff --git a/vendor/github.com/cilium/cilium/pkg/endpoint/id/identifiers.go b/vendor/github.com/cilium/cilium/pkg/endpoint/id/identifiers.go deleted file mode 100644 index d2deaa391..000000000 --- a/vendor/github.com/cilium/cilium/pkg/endpoint/id/identifiers.go +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package id - -// Identifiers is a collection of attributes that identify the Endpoint through -// different systems. For examples of the type of Identifiers, see PrefixType. -type Identifiers map[PrefixType]string diff --git a/vendor/github.com/cilium/cilium/pkg/health/client/client.go b/vendor/github.com/cilium/cilium/pkg/health/client/client.go deleted file mode 100644 index 6d603973b..000000000 --- a/vendor/github.com/cilium/cilium/pkg/health/client/client.go +++ /dev/null @@ -1,411 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package client - -import ( - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "sort" - "strings" - "time" - - runtime_client "github.com/go-openapi/runtime/client" - "github.com/go-openapi/strfmt" - - clientapi "github.com/cilium/cilium/api/v1/health/client" - "github.com/cilium/cilium/api/v1/health/models" - "github.com/cilium/cilium/pkg/health/defaults" -) - -type ConnectivityStatusType int - -const ( - ipUnavailable = "Unavailable" - - ConnStatusReachable ConnectivityStatusType = 0 - ConnStatusUnreachable ConnectivityStatusType = 1 - ConnStatusUnknown ConnectivityStatusType = 2 -) - -func (c ConnectivityStatusType) String() string { - switch c { - case ConnStatusReachable: - return "reachable" - case ConnStatusUnreachable: - return "unreachable" - default: - return "unknown" - } -} - -// Client is a client for cilium health -type Client struct { - clientapi.CiliumHealthAPI -} - -func configureTransport(tr *http.Transport, proto, addr string) *http.Transport { - if tr == nil { - tr = &http.Transport{} - } - - if proto == "unix" { - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.Dial(proto, addr) - } - } else { - tr.Proxy = http.ProxyFromEnvironment - tr.Dial = (&net.Dialer{}).Dial - } - - return tr -} - -// NewDefaultClient creates a client with default parameters connecting to UNIX domain socket. -func NewDefaultClient() (*Client, error) { - return NewClient("") -} - -// NewClient creates a client for the given `host`. -func NewClient(host string) (*Client, error) { - if host == "" { - // Check if environment variable points to socket - e := os.Getenv(defaults.SockPathEnv) - if e == "" { - // If unset, fall back to default value - e = defaults.SockPath - } - host = "unix://" + e - } - tmp := strings.SplitN(host, "://", 2) - if len(tmp) != 2 { - return nil, fmt.Errorf("invalid host format '%s'", host) - } - - switch tmp[0] { - case "tcp": - if _, err := url.Parse("tcp://" + tmp[1]); err != nil { - return nil, err - } - host = "http://" + tmp[1] - case "unix": - host = tmp[1] - } - - transport := configureTransport(nil, tmp[0], host) - httpClient := &http.Client{Transport: transport} - clientTrans := runtime_client.NewWithClient(tmp[1], clientapi.DefaultBasePath, - clientapi.DefaultSchemes, httpClient) - return &Client{*clientapi.New(clientTrans, strfmt.Default)}, nil -} - -// Hint tries to improve the error message displayed to the user. -func Hint(err error) error { - if err == nil { - return err - } - e, _ := url.PathUnescape(err.Error()) - if strings.Contains(err.Error(), defaults.SockPath) { - return fmt.Errorf("%s\nIs the agent running?", e) - } - return fmt.Errorf("%s", e) -} - -func GetConnectivityStatusType(cs *models.ConnectivityStatus) ConnectivityStatusType { - // If the connecticity status is nil, it means that there was no - // successful probe, but also no failed probe with a concrete reason. In - // that case, the status is unknown and it usually means that the new - // is still in the beginning of the bootstraping process. - if cs == nil { - return ConnStatusUnknown - } - // Empty status means successful probe. - if cs.Status == "" { - return ConnStatusReachable - } - // Non-empty status means that there was an explicit reason of failure. - return ConnStatusUnreachable -} - -func GetPathConnectivityStatusType(cp *models.PathStatus) ConnectivityStatusType { - if cp == nil { - return ConnStatusUnreachable - } - statuses := []*models.ConnectivityStatus{ - cp.Icmp, - cp.HTTP, - } - // Initially assume healthy status. - status := ConnStatusReachable - for _, cs := range statuses { - switch GetConnectivityStatusType(cs) { - case ConnStatusUnreachable: - // If any status is unreachable, return it immediately. - return ConnStatusUnreachable - case ConnStatusUnknown: - // If the status is unknown, prepare to return it. It's - // going to be returned if there is no unreachable - // status in next iterations. - status = ConnStatusUnknown - } - } - return status -} - -func SummarizePathConnectivityStatusType(cps []*models.PathStatus) ConnectivityStatusType { - status := ConnStatusReachable - for _, cp := range cps { - switch GetPathConnectivityStatusType(cp) { - case ConnStatusUnreachable: - // If any status is unreachable, return it immediately. - return ConnStatusUnreachable - case ConnStatusUnknown: - // If the status is unknown, prepare to return it. It's - // going to be returned if there is no unreachable - // status in next iterations. - status = ConnStatusUnknown - } - } - return status -} - -func formatConnectivityStatus(w io.Writer, cs *models.ConnectivityStatus, path, indent string) { - status := cs.Status - switch GetConnectivityStatusType(cs) { - case ConnStatusReachable: - latency := time.Duration(cs.Latency) - status = fmt.Sprintf("OK, RTT=%s", latency) - } - fmt.Fprintf(w, "%s%s:\t%s\n", indent, path, status) -} - -func formatPathStatus(w io.Writer, name string, cp *models.PathStatus, indent string, verbose bool) { - if cp == nil { - if verbose { - fmt.Fprintf(w, "%s%s connectivity:\tnil\n", indent, name) - } - return - } - fmt.Fprintf(w, "%s%s connectivity to %s:\n", indent, name, cp.IP) - indent = fmt.Sprintf("%s ", indent) - - if cp.Icmp != nil { - formatConnectivityStatus(w, cp.Icmp, "ICMP to stack", indent) - } - if cp.HTTP != nil { - formatConnectivityStatus(w, cp.HTTP, "HTTP to agent", indent) - } -} - -// allPathsAreHealthyOrUnknown checks whether ICMP and TCP(HTTP) connectivity -// to the given paths is available or had no explicit error status -// (which usually is the case when the new node is provisioned). -func allPathsAreHealthyOrUnknown(cps []*models.PathStatus) bool { - for _, cp := range cps { - if cp == nil { - return false - } - - statuses := []*models.ConnectivityStatus{ - cp.Icmp, - cp.HTTP, - } - for _, status := range statuses { - switch GetConnectivityStatusType(status) { - case ConnStatusUnreachable: - return false - } - } - } - return true -} - -func nodeIsHealthy(node *models.NodeStatus) bool { - return allPathsAreHealthyOrUnknown(GetAllHostAddresses(node)) && - allPathsAreHealthyOrUnknown(GetAllEndpointAddresses(node)) -} - -func nodeIsLocalhost(node *models.NodeStatus, self *models.SelfStatus) bool { - return self != nil && node.Name == self.Name -} - -func getPrimaryAddressIP(node *models.NodeStatus) string { - if node.Host == nil || node.Host.PrimaryAddress == nil { - return ipUnavailable - } - - return node.Host.PrimaryAddress.IP -} - -// GetHostPrimaryAddress returns the PrimaryAddress for the Host within node. -// If node.Host is nil, returns nil. -func GetHostPrimaryAddress(node *models.NodeStatus) *models.PathStatus { - if node.Host == nil { - return nil - } - - return node.Host.PrimaryAddress -} - -// GetHostSecondaryAddresses returns the secondary host addresses (if any) -func GetHostSecondaryAddresses(node *models.NodeStatus) []*models.PathStatus { - if node.Host == nil { - return nil - } - - return node.Host.SecondaryAddresses -} - -// GetAllHostAddresses returns a list of all addresses (primary and any -// and any secondary) for the host of a given node. If node.Host is nil, -// returns nil. -func GetAllHostAddresses(node *models.NodeStatus) []*models.PathStatus { - if node.Host == nil { - return nil - } - - return append([]*models.PathStatus{node.Host.PrimaryAddress}, node.Host.SecondaryAddresses...) -} - -// GetEndpointPrimaryAddress returns the PrimaryAddress for the health endpoint -// within node. If node.HealthEndpoint is nil, returns nil. -func GetEndpointPrimaryAddress(node *models.NodeStatus) *models.PathStatus { - if node.HealthEndpoint == nil { - return nil - } - - return node.HealthEndpoint.PrimaryAddress -} - -// GetEndpointSecondaryAddresses returns the secondary health endpoint addresses -// (if any) -func GetEndpointSecondaryAddresses(node *models.NodeStatus) []*models.PathStatus { - if node.HealthEndpoint == nil { - return nil - } - - return node.HealthEndpoint.SecondaryAddresses -} - -// GetAllEndpointAddresses returns a list of all addresses (primary and any -// secondary) for the health endpoint within a given node. -// If node.HealthEndpoint is nil, returns nil. -func GetAllEndpointAddresses(node *models.NodeStatus) []*models.PathStatus { - if node.HealthEndpoint == nil { - return nil - } - - return append([]*models.PathStatus{node.HealthEndpoint.PrimaryAddress}, node.HealthEndpoint.SecondaryAddresses...) -} - -func formatNodeStatus(w io.Writer, node *models.NodeStatus, printAll, succinct, verbose, localhost bool) { - localStr := "" - if localhost { - localStr = " (localhost)" - } - if succinct { - if printAll || !nodeIsHealthy(node) { - fmt.Fprintf(w, " %s%s\t%s\t%s\t%s\n", node.Name, - localStr, getPrimaryAddressIP(node), - SummarizePathConnectivityStatusType(GetAllHostAddresses(node)).String(), - SummarizePathConnectivityStatusType(GetAllEndpointAddresses(node)).String()) - } - } else { - fmt.Fprintf(w, " %s%s:\n", node.Name, localStr) - formatPathStatus(w, "Host", GetHostPrimaryAddress(node), " ", verbose) - unhealthyPaths := !allPathsAreHealthyOrUnknown(GetHostSecondaryAddresses(node)) - if (verbose || unhealthyPaths) && node.Host != nil { - for _, addr := range node.Host.SecondaryAddresses { - formatPathStatus(w, "Secondary", addr, " ", verbose) - } - } - formatPathStatus(w, "Endpoint", GetEndpointPrimaryAddress(node), " ", verbose) - unhealthyPaths = !allPathsAreHealthyOrUnknown(GetEndpointSecondaryAddresses(node)) - if (verbose || unhealthyPaths) && node.HealthEndpoint != nil { - for _, addr := range node.HealthEndpoint.SecondaryAddresses { - formatPathStatus(w, "Secondary", addr, " ", verbose) - } - } - } -} - -// FormatHealthStatusResponse writes a HealthStatusResponse as a string to the -// writer. -// -// 'printAll', if true, causes all nodes to be printed regardless of status -// 'succinct', if true, causes node health to be output as one line per node -// 'verbose', if true, overrides 'succinct' and prints all information -// 'maxLines', if nonzero, determines the maximum number of lines to print -func FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, printAll, succinct, verbose bool, maxLines int) { - var ( - healthy int - localhost *models.NodeStatus - ) - for _, node := range sr.Nodes { - if nodeIsHealthy(node) { - healthy++ - } - if nodeIsLocalhost(node, sr.Local) { - localhost = node - } - } - if succinct { - fmt.Fprintf(w, "Cluster health:\t%d/%d reachable\t(%s)\n", - healthy, len(sr.Nodes), sr.Timestamp) - if printAll || healthy < len(sr.Nodes) { - fmt.Fprintf(w, " Name\tIP\tNode\tEndpoints\n") - } - } else { - fmt.Fprintf(w, "Probe time:\t%s\n", sr.Timestamp) - fmt.Fprintf(w, "Nodes:\n") - } - - if localhost != nil { - formatNodeStatus(w, localhost, printAll, succinct, verbose, true) - maxLines-- - } - - nodes := sr.Nodes - sort.Slice(nodes, func(i, j int) bool { - return strings.Compare(nodes[i].Name, nodes[j].Name) < 0 - }) - for n, node := range nodes { - if maxLines > 0 && n > maxLines { - break - } - if node == localhost { - continue - } - formatNodeStatus(w, node, printAll, succinct, verbose, false) - } - if maxLines > 0 && len(sr.Nodes)-healthy > maxLines { - fmt.Fprintf(w, " ...") - } -} - -// GetAndFormatHealthStatus fetches the health status from the cilium-health -// daemon via the default channel and formats its output as a string to the -// writer. -// -// 'succinct', 'verbose' and 'maxLines' are handled the same as in -// FormatHealthStatusResponse(). -func GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) { - client, err := NewClient("") - if err != nil { - fmt.Fprintf(w, "Cluster health:\t\t\tClient error: %s\n", err) - return - } - hr, err := client.Connectivity.GetStatus(nil) - if err != nil { - // The regular `cilium status` output will print the reason why. - fmt.Fprintf(w, "Cluster health:\t\t\tWarning\tcilium-health daemon unreachable\n") - return - } - FormatHealthStatusResponse(w, hr.Payload, verbose, succinct, verbose, maxLines) -} diff --git a/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go b/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go deleted file mode 100644 index d84cecd2c..000000000 --- a/vendor/github.com/cilium/cilium/pkg/health/defaults/defaults.go +++ /dev/null @@ -1,23 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package defaults - -import ( - daemon "github.com/cilium/cilium/pkg/defaults" -) - -const ( - // SockPath is the path to the UNIX domain socket exposing the API to clients locally - SockPath = daemon.RuntimePath + "/health.sock" - - // SockPathEnv is the environment variable to overwrite SockPath - SockPathEnv = "CILIUM_HEALTH_SOCK" - - // HTTPPathPort is used for probing base HTTP path connectivity - HTTPPathPort = daemon.ClusterHealthPort - - // HealthEPName is the name used for the health endpoint, which is also - // used by the CLI client to detect when connectivity health is enabled - HealthEPName = "cilium-health-ep" -) diff --git a/vendor/github.com/cilium/cilium/pkg/identity/identity.go b/vendor/github.com/cilium/cilium/pkg/identity/identity.go index ac4dc8663..bd00be515 100644 --- a/vendor/github.com/cilium/cilium/pkg/identity/identity.go +++ b/vendor/github.com/cilium/cilium/pkg/identity/identity.go @@ -4,6 +4,7 @@ package identity import ( + "encoding/json" "net" "strconv" @@ -64,6 +65,23 @@ type IPIdentityPair struct { NamedPorts []NamedPort `json:"NamedPorts,omitempty"` } +// GetKeyName returns the kvstore key to be used for the IPIdentityPair +func (pair *IPIdentityPair) GetKeyName() string { return pair.PrefixString() } + +// Marshal returns the IPIdentityPair object as JSON byte slice +func (pair *IPIdentityPair) Marshal() ([]byte, error) { return json.Marshal(pair) } + +// Unmarshal parses the JSON byte slice and updates the IPIdentityPair receiver +func (pair *IPIdentityPair) Unmarshal(_ string, data []byte) error { + newPair := IPIdentityPair{} + if err := json.Unmarshal(data, &newPair); err != nil { + return err + } + + *pair = newPair + return nil +} + // NamedPort is a mapping from a port name to a port number and protocol. // // WARNING - STABLE API @@ -113,6 +131,12 @@ func (id *Identity) IsWellKnown() bool { return WellKnown.lookupByNumericIdentity(id.ID) != nil } +// IsWellKnownIdentity returns true if the identity represents a well-known +// identity, false otherwise. +func IsWellKnownIdentity(id NumericIdentity) bool { + return WellKnown.lookupByNumericIdentity(id) != nil +} + // NewIdentityFromLabelArray creates a new identity func NewIdentityFromLabelArray(id NumericIdentity, lblArray labels.LabelArray) *Identity { var lbls labels.Labels diff --git a/vendor/github.com/cilium/cilium/pkg/ip/cidr.go b/vendor/github.com/cilium/cilium/pkg/ip/cidr.go index 653358120..cf9612e55 100644 --- a/vendor/github.com/cilium/cilium/pkg/ip/cidr.go +++ b/vendor/github.com/cilium/cilium/pkg/ip/cidr.go @@ -57,18 +57,19 @@ func ParsePrefixes(cidrs []string) (valid []netip.Prefix, invalid []string, erro return valid, invalid, errors } -// PrefixToIPNet is a convenience helper for migrating from the older 'net' +// IPNetToPrefix is a convenience helper for migrating from the older 'net' // standard library types to the newer 'netip' types. Use this to plug the // new types in newer code into older types in older code during the migration. -func PrefixToIPNet(prefix netip.Prefix) *net.IPNet { - if !prefix.IsValid() { - return nil +func IPNetToPrefix(prefix *net.IPNet) netip.Prefix { + if prefix == nil { + return netip.Prefix{} } - addr := prefix.Masked().Addr() - return &net.IPNet{ - IP: addr.AsSlice(), - Mask: net.CIDRMask(prefix.Bits(), addr.BitLen()), + addr, ok := AddrFromIP(prefix.IP) + if !ok { + return netip.Prefix{} } + ones, _ := prefix.Mask.Size() + return netip.PrefixFrom(addr, ones) } // AddrToIPNet is a convenience helper to convert a netip.Addr to a *net.IPNet diff --git a/vendor/github.com/cilium/cilium/pkg/ip/ip.go b/vendor/github.com/cilium/cilium/pkg/ip/ip.go index dc7af5729..14ef09eb1 100644 --- a/vendor/github.com/cilium/cilium/pkg/ip/ip.go +++ b/vendor/github.com/cilium/cilium/pkg/ip/ip.go @@ -11,7 +11,7 @@ import ( "net/netip" "sort" - "golang.org/x/exp/slices" + "github.com/cilium/cilium/pkg/slices" ) const ( @@ -743,27 +743,16 @@ func PartitionCIDR(targetCIDR net.IPNet, excludeCIDR net.IPNet) ([]*net.IPNet, [ // lexicographically sorted via a byte-wise comparison of the IP slices (i.e. // IPv4 addresses show up before IPv6). // The slice is manipulated in-place destructively. -// -// 1- Sort the slice by comparing the IPs as bytes -// 2- For every unseen unique IP in the sorted slice, move it to the end of -// the return slice. -// Note that the slice is always large enough and, because it is sorted, we -// will not overwrite a valid element with another. To overwrite an element i -// with j, i must have come before j AND we decided it was a duplicate of the -// element at i-1. func KeepUniqueIPs(ips []net.IP) []net.IP { - sort.Slice(ips, func(i, j int) bool { - return bytes.Compare(ips[i], ips[j]) == -1 - }) - - returnIPs := ips[:0] // len==0 but cap==cap(ips) - for readIdx, ip := range ips { - if len(returnIPs) == 0 || !returnIPs[len(returnIPs)-1].Equal(ips[readIdx]) { - returnIPs = append(returnIPs, ip) - } - } - - return returnIPs + return slices.SortedUniqueFunc( + ips, + func(i, j int) bool { + return bytes.Compare(ips[i], ips[j]) == -1 + }, + func(a, b net.IP) bool { + return a.Equal(b) + }, + ) } // KeepUniqueAddrs transforms the provided multiset of IP addresses into a @@ -771,13 +760,15 @@ func KeepUniqueIPs(ips []net.IP) []net.IP { // netip.Addr.Compare (i.e. IPv4 addresses show up before IPv6). // The slice is manipulated in-place destructively; it does not create a new slice. func KeepUniqueAddrs(addrs []netip.Addr) []netip.Addr { - if len(addrs) == 0 { - return addrs - } - sort.Slice(addrs, func(i, j int) bool { - return addrs[i].Compare(addrs[j]) < 0 - }) - return slices.Compact(addrs) + return slices.SortedUniqueFunc( + addrs, + func(i, j int) bool { + return addrs[i].Compare(addrs[j]) < 0 + }, + func(a, b netip.Addr) bool { + return a == b + }, + ) } var privateIPBlocks []*net.IPNet diff --git a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go index 121482ab9..2d82d82bc 100644 --- a/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go +++ b/vendor/github.com/cilium/cilium/pkg/ipam/option/option.go @@ -26,6 +26,9 @@ const ( // IPAMClusterPoolV2 is the value to select cluster pool version 2 IPAMClusterPoolV2 = "cluster-pool-v2beta" + // IPAMMultiPool is the value to select the multi pool IPAM mode + IPAMMultiPool = "multi-pool" + // IPAMAlibabaCloud is the value to select the AlibabaCloud ENI IPAM plugin for option.IPAM IPAMAlibabaCloud = "alibabacloud" diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go index ebe01a137..cfc12d39a 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/apis/cilium.io/register.go @@ -6,4 +6,14 @@ package ciliumio const ( // CustomResourceDefinitionGroup is the name of the third party resource group CustomResourceDefinitionGroup = "cilium.io" + + // CustomResourceDefinitionSchemaVersionKey is key to label which holds the CRD schema version + CustomResourceDefinitionSchemaVersionKey = "io.cilium.k8s.crd.schema.version" + + // CustomResourceDefinitionSchemaVersion is semver-conformant version of CRD schema + // Used to determine if CRD needs to be updated in cluster + // + // Maintainers: Run ./Documentation/check-crd-compat-table.sh for each release + // Developers: Bump patch for each change in the CRD schema. + CustomResourceDefinitionSchemaVersion = "1.26.9" ) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go index 4a3b3bf98..5dbe47684 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.pb.go @@ -1175,171 +1175,172 @@ func init() { } var fileDescriptor_871504499faea14d = []byte{ - // 2612 bytes of a gzipped FileDescriptorProto + // 2631 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4d, 0x6c, 0x23, 0x49, - 0xf5, 0x9f, 0xb6, 0xe3, 0xc4, 0x7e, 0x4e, 0x9c, 0x4d, 0xcd, 0x64, 0xd5, 0x93, 0xff, 0x6e, 0x92, - 0x7f, 0x0f, 0x42, 0xc3, 0x97, 0xa3, 0x19, 0x08, 0x0c, 0x33, 0xbb, 0xc3, 0xc4, 0x4e, 0x66, 0xc7, - 0xbb, 0xc9, 0xac, 0x29, 0x47, 0x0b, 0x82, 0x03, 0xdb, 0xe9, 0xae, 0xd8, 0xbd, 0xb1, 0xbb, 0x4d, - 0x57, 0x79, 0x66, 0x2d, 0x21, 0x58, 0x84, 0x40, 0x0b, 0xac, 0x04, 0x47, 0x2e, 0x5c, 0xb8, 0x71, - 0x86, 0x0b, 0xe2, 0xbc, 0x62, 0xb8, 0x2d, 0xb7, 0x11, 0x42, 0x61, 0x27, 0x48, 0xdc, 0x38, 0x20, - 0x6e, 0x41, 0x42, 0xa8, 0xaa, 0xab, 0xaa, 0xbb, 0x1d, 0x7b, 0x26, 0x63, 0x47, 0x8a, 0x38, 0xb9, - 0xfb, 0xbd, 0x57, 0xef, 0x57, 0xaf, 0xea, 0xd5, 0xfb, 0x28, 0x37, 0x6c, 0x35, 0x3d, 0xd6, 0xea, - 0xed, 0x95, 0x9d, 0xa0, 0xb3, 0xe6, 0x78, 0x6d, 0xaf, 0xa7, 0x7f, 0xba, 0x07, 0xcd, 0xb5, 0x83, - 0x1b, 0x74, 0x8d, 0xb6, 0xbd, 0x8e, 0x78, 0xb0, 0xbb, 0xde, 0x9a, 0x13, 0x84, 0x64, 0xed, 0xc1, - 0xb5, 0xb5, 0x26, 0xf1, 0x49, 0x68, 0x33, 0xe2, 0x96, 0xbb, 0x61, 0xc0, 0x02, 0xb4, 0x1e, 0xab, - 0x29, 0x47, 0xe3, 0xd5, 0x4f, 0xf7, 0xa0, 0x59, 0x3e, 0xb8, 0x41, 0xcb, 0x5c, 0x8d, 0x78, 0xb0, - 0xbb, 0x5e, 0x99, 0xab, 0x29, 0x3f, 0xb8, 0xb6, 0x74, 0xf7, 0xb9, 0xd0, 0xe9, 0x5a, 0x87, 0x30, - 0x7b, 0x08, 0xfc, 0xd2, 0xe7, 0x12, 0x7a, 0x9a, 0x41, 0x33, 0x58, 0x13, 0xe4, 0xbd, 0xde, 0xbe, - 0x78, 0x13, 0x2f, 0xe2, 0x49, 0x8a, 0x73, 0x85, 0x65, 0x2f, 0xe0, 0x3a, 0x3b, 0xb6, 0xd3, 0xf2, - 0x7c, 0x12, 0xf6, 0x05, 0x62, 0xd8, 0xf3, 0x99, 0xd7, 0x21, 0x27, 0xf4, 0x7f, 0xf1, 0x59, 0x03, - 0xa8, 0xd3, 0x22, 0x1d, 0x7b, 0x70, 0x9c, 0xb5, 0x0d, 0xa5, 0x6a, 0xdb, 0x23, 0x3e, 0xab, 0xd5, - 0xab, 0x81, 0xbf, 0xef, 0x35, 0xd1, 0x4d, 0x28, 0xf1, 0x01, 0x41, 0x8f, 0x35, 0x88, 0x13, 0xf8, - 0x2e, 0x35, 0x8d, 0x55, 0xe3, 0x6a, 0xae, 0x82, 0x8e, 0x0e, 0x57, 0x4a, 0xbb, 0x29, 0x0e, 0x1e, - 0x90, 0xb4, 0x7e, 0x9f, 0x81, 0x42, 0x35, 0xf0, 0x99, 0xcd, 0xf1, 0xd1, 0x2a, 0x4c, 0xf9, 0x76, - 0x87, 0x88, 0xf1, 0x85, 0xca, 0xec, 0xa3, 0xc3, 0x95, 0x0b, 0x47, 0x87, 0x2b, 0x53, 0xf7, 0xed, - 0x0e, 0xc1, 0x82, 0x83, 0xae, 0x40, 0xce, 0xeb, 0xd8, 0x4d, 0x62, 0x66, 0x84, 0xc8, 0x9c, 0x14, - 0xc9, 0xd5, 0x38, 0x11, 0x47, 0x3c, 0xe4, 0x41, 0xae, 0x1b, 0x84, 0x8c, 0x9a, 0xd3, 0xab, 0xd9, - 0xab, 0xc5, 0xeb, 0x9b, 0xe5, 0xb1, 0x76, 0xb2, 0xac, 0xe7, 0x55, 0x0f, 0x42, 0x16, 0x43, 0xf1, - 0x37, 0x8a, 0x23, 0x04, 0xf4, 0x1d, 0x98, 0x7d, 0x10, 0xb4, 0x7b, 0x1d, 0xb2, 0x13, 0xf4, 0x7c, - 0x46, 0xcd, 0x82, 0x40, 0xac, 0x8c, 0x89, 0xf8, 0x56, 0xac, 0xaa, 0x72, 0x49, 0xe2, 0xcd, 0x26, - 0x88, 0x14, 0xa7, 0xd0, 0xac, 0xff, 0x18, 0x30, 0x97, 0x9a, 0xe5, 0x29, 0x56, 0xf0, 0xb3, 0x90, - 0x6f, 0x05, 0x94, 0x71, 0x69, 0xb1, 0x88, 0xb9, 0xca, 0x0b, 0x52, 0x2a, 0x7f, 0x4f, 0xd2, 0xb1, - 0x96, 0x40, 0xb7, 0x60, 0xce, 0x49, 0x02, 0x98, 0x59, 0x31, 0x64, 0x51, 0x0e, 0x49, 0xa3, 0xe3, - 0xb4, 0x2c, 0xba, 0x01, 0x79, 0xe1, 0x33, 0x4e, 0xd0, 0x36, 0xa7, 0xc4, 0x84, 0x5e, 0x52, 0x50, - 0x75, 0x49, 0x3f, 0x4e, 0x3c, 0x63, 0x2d, 0x8d, 0x3e, 0x09, 0xd3, 0x7c, 0x0a, 0xb5, 0xba, 0x99, - 0x13, 0xe3, 0x4a, 0x72, 0xdc, 0xf4, 0x3d, 0x41, 0xc5, 0x92, 0x6b, 0xfd, 0xd0, 0x80, 0x92, 0x9e, - 0x42, 0x83, 0xd9, 0x8c, 0x20, 0x0a, 0x33, 0x61, 0xcf, 0xf7, 0x3d, 0xbf, 0x29, 0xcc, 0x2b, 0x5e, - 0xdf, 0x9e, 0x74, 0xfb, 0x85, 0x5e, 0x1c, 0xe9, 0xac, 0x14, 0x8f, 0x0e, 0x57, 0x66, 0xe4, 0x0b, - 0x56, 0x48, 0xd6, 0x8f, 0x0c, 0x58, 0x1c, 0x2a, 0x8f, 0x3a, 0x50, 0xa0, 0xcc, 0x0e, 0x19, 0x71, - 0x37, 0x98, 0xd8, 0x95, 0xe2, 0xf5, 0x57, 0x9e, 0x6f, 0x42, 0xb4, 0xcc, 0x43, 0x04, 0x9f, 0x11, - 0x3f, 0x4b, 0x95, 0x05, 0xb9, 0x14, 0x85, 0x86, 0x52, 0x8b, 0x63, 0x04, 0xeb, 0xb7, 0x06, 0xcc, - 0xa7, 0x26, 0xd2, 0xa3, 0xe8, 0x1d, 0xc8, 0x51, 0x3e, 0x25, 0xb9, 0x1e, 0x5b, 0x67, 0xb2, 0x1e, - 0xf1, 0x79, 0x88, 0xcc, 0x8d, 0x20, 0xd0, 0x3a, 0x14, 0xb5, 0x0f, 0xd4, 0x36, 0xcd, 0xbc, 0xd8, - 0xbd, 0x8b, 0x52, 0xb4, 0x58, 0x8d, 0x59, 0x38, 0x29, 0x67, 0x7d, 0x0d, 0xe6, 0xb7, 0x7c, 0xb7, - 0x1b, 0x78, 0x3e, 0xdb, 0x70, 0xdd, 0x90, 0x50, 0x8a, 0x96, 0x20, 0xe3, 0x75, 0xa5, 0x1f, 0x83, - 0x54, 0x90, 0xa9, 0xd5, 0x71, 0xc6, 0xeb, 0xa2, 0xab, 0x90, 0xf7, 0x03, 0x97, 0x70, 0xaf, 0x96, - 0x8e, 0x35, 0xcb, 0x9d, 0xea, 0xbe, 0xa4, 0x61, 0xcd, 0xb5, 0x3e, 0x30, 0x60, 0x56, 0x69, 0x3e, - 0xe5, 0x01, 0x59, 0x85, 0xa9, 0x6e, 0x7c, 0x38, 0xb4, 0x84, 0x70, 0x70, 0xc1, 0x49, 0xf9, 0x75, - 0xf6, 0x79, 0xfc, 0xda, 0xfa, 0x97, 0x01, 0x25, 0x35, 0x9d, 0x46, 0x6f, 0x8f, 0x12, 0x86, 0x1e, - 0x42, 0xc1, 0x8e, 0x4c, 0x26, 0x3c, 0x70, 0xf2, 0xf0, 0x71, 0x77, 0xcc, 0x1d, 0x1a, 0x58, 0xc2, - 0xd8, 0x55, 0x36, 0x14, 0x00, 0x8e, 0xb1, 0x50, 0x4b, 0x45, 0xc9, 0xac, 0x00, 0xad, 0x4e, 0x08, - 0x3a, 0x3a, 0x48, 0x5a, 0xff, 0x34, 0xa0, 0xa0, 0xc4, 0x28, 0x0a, 0x21, 0xcf, 0x1d, 0xda, 0xb5, - 0x99, 0x2d, 0x0f, 0x44, 0x65, 0xdc, 0x03, 0xf1, 0xe6, 0xde, 0x3b, 0xc4, 0x61, 0x3b, 0x84, 0xd9, - 0x15, 0x24, 0x91, 0x21, 0xa6, 0x61, 0x8d, 0x83, 0xba, 0x30, 0x43, 0xc5, 0x72, 0x53, 0x33, 0x23, - 0xac, 0xdd, 0x9a, 0xd0, 0xda, 0x68, 0xf3, 0x2a, 0xf3, 0x12, 0x75, 0x26, 0x7a, 0xa7, 0x58, 0xc1, - 0x58, 0x7f, 0x37, 0x60, 0x4e, 0xdb, 0xbc, 0xed, 0x51, 0x86, 0xfc, 0x13, 0x76, 0xdf, 0x19, 0xd7, - 0x6e, 0xae, 0x4f, 0x58, 0xad, 0x43, 0xb7, 0xa2, 0x24, 0x6c, 0x26, 0x90, 0xf3, 0x18, 0xe9, 0x28, - 0x8b, 0xef, 0x4c, 0x68, 0x31, 0x4d, 0x24, 0x5b, 0xae, 0x16, 0x47, 0xda, 0xad, 0x3f, 0x18, 0x70, - 0x71, 0x3b, 0xb0, 0xdd, 0x8a, 0xdd, 0xb6, 0x7d, 0x87, 0x84, 0x35, 0xbf, 0xf9, 0xcc, 0xf3, 0x2b, - 0x73, 0x90, 0x38, 0x88, 0x51, 0x22, 0x4f, 0xe5, 0x20, 0x5f, 0x9c, 0x61, 0x25, 0x81, 0xf6, 0x95, - 0xa3, 0x4e, 0x09, 0x43, 0x36, 0xc6, 0x34, 0x84, 0xbb, 0x64, 0x14, 0x11, 0x47, 0xb8, 0xe9, 0x4f, - 0x0d, 0x40, 0x49, 0x4b, 0x64, 0xf8, 0xec, 0xc1, 0x8c, 0x17, 0xd9, 0x24, 0x8f, 0xe7, 0xeb, 0x63, - 0x4e, 0x60, 0xc8, 0x2a, 0xc5, 0x0e, 0x24, 0x09, 0x58, 0x61, 0x59, 0xdf, 0x83, 0x02, 0x0f, 0x4a, - 0xb4, 0x6b, 0x3b, 0xe4, 0x3c, 0xce, 0x8c, 0xf0, 0x60, 0x3d, 0x83, 0xff, 0x65, 0x0f, 0xd6, 0x46, - 0x8c, 0xf0, 0xe0, 0x47, 0x19, 0x98, 0xe2, 0xa9, 0xe3, 0x5c, 0x22, 0x93, 0x0d, 0x53, 0xb4, 0x4b, - 0x1c, 0x99, 0x9b, 0xbf, 0x32, 0xae, 0x89, 0x81, 0x4b, 0x1a, 0x5d, 0xe2, 0xc4, 0xe9, 0x8a, 0xbf, - 0x61, 0xa1, 0x1a, 0x79, 0x30, 0x4d, 0x85, 0x2b, 0x8b, 0x64, 0x35, 0xfe, 0x01, 0x12, 0x20, 0xd1, - 0x01, 0xd2, 0xf5, 0x58, 0xf4, 0x8e, 0x25, 0x80, 0xd5, 0x81, 0x22, 0x97, 0x52, 0x39, 0xfc, 0xf3, - 0x30, 0xc5, 0xfa, 0x5d, 0x95, 0x6c, 0x57, 0xd4, 0xdc, 0x76, 0xfb, 0x5d, 0x72, 0x7c, 0xb8, 0x32, - 0x9f, 0x10, 0xe5, 0x24, 0x2c, 0x84, 0xd1, 0xa7, 0x60, 0x46, 0x26, 0x29, 0x19, 0x1b, 0xf4, 0x19, - 0x91, 0xb2, 0x58, 0xf1, 0xad, 0x5f, 0x73, 0x17, 0x0d, 0x5c, 0x52, 0x0d, 0x7c, 0xd7, 0x63, 0x5e, - 0xe0, 0xa3, 0xf5, 0x14, 0xe2, 0xff, 0x0f, 0x20, 0x2e, 0xa4, 0x84, 0x13, 0x98, 0x5f, 0xd6, 0x4b, - 0x94, 0x49, 0x0d, 0x94, 0xf6, 0xf1, 0xc9, 0xea, 0x61, 0x69, 0x93, 0x79, 0xa9, 0x1a, 0x12, 0x9b, - 0x06, 0xfe, 0x60, 0xa9, 0x8a, 0x05, 0x15, 0x4b, 0xae, 0xf5, 0x17, 0x03, 0x44, 0x81, 0x72, 0x2e, - 0x27, 0xe9, 0xed, 0xf4, 0x49, 0xba, 0x35, 0x81, 0x07, 0x8c, 0x38, 0x44, 0x1f, 0x4a, 0xf3, 0xb8, - 0xdf, 0xf1, 0x2d, 0xec, 0x06, 0x6e, 0xb5, 0xb6, 0x89, 0xe5, 0x46, 0xe8, 0x2d, 0xac, 0x47, 0x64, - 0xac, 0xf8, 0xbc, 0x94, 0x93, 0x8f, 0xd4, 0x9c, 0x59, 0xcd, 0xaa, 0x52, 0x4e, 0xca, 0x51, 0xac, - 0xb9, 0xc8, 0x85, 0x69, 0x5e, 0x2f, 0x32, 0x6a, 0xe6, 0x84, 0x11, 0xaf, 0x8c, 0x69, 0xc4, 0x2e, - 0x57, 0x12, 0x6f, 0x93, 0x78, 0xa5, 0x58, 0xea, 0xb6, 0xfe, 0x6d, 0x00, 0xc4, 0x8e, 0x8e, 0xde, - 0x05, 0x70, 0xd4, 0xc6, 0xab, 0x04, 0xb4, 0x39, 0xc1, 0xea, 0x69, 0x2f, 0x8a, 0xc3, 0x82, 0x26, - 0x51, 0x9c, 0xc0, 0x42, 0x34, 0x59, 0x17, 0xe6, 0x26, 0x6a, 0x2b, 0x13, 0xe7, 0xec, 0xe9, 0x35, - 0xa1, 0xf5, 0x61, 0x06, 0xb2, 0xf5, 0xc0, 0x3d, 0x97, 0x48, 0xf8, 0x76, 0x2a, 0x12, 0xde, 0x1e, - 0x3b, 0xcb, 0xbb, 0x23, 0x03, 0x61, 0x6b, 0x20, 0x10, 0xde, 0x99, 0x00, 0xe3, 0xe9, 0x71, 0xf0, - 0x71, 0x16, 0x66, 0xb9, 0x0b, 0xeb, 0xb8, 0xf4, 0x85, 0x54, 0x5c, 0x5a, 0x1d, 0x88, 0x4b, 0x2f, - 0x24, 0x65, 0xcf, 0x26, 0x2c, 0xf5, 0x61, 0xae, 0x6d, 0x53, 0x56, 0x0f, 0x83, 0x3d, 0xc2, 0xfb, - 0x46, 0x69, 0xf2, 0x64, 0xbd, 0xa7, 0x6e, 0xfb, 0xb7, 0x93, 0xaa, 0x71, 0x1a, 0x09, 0xbd, 0x6f, - 0x00, 0xe2, 0x94, 0xdd, 0xd0, 0xf6, 0x69, 0x64, 0x92, 0x27, 0x1b, 0xb5, 0x49, 0x27, 0xb0, 0x24, - 0x27, 0x80, 0xb6, 0x4f, 0xe8, 0xc7, 0x43, 0x30, 0x4f, 0x1b, 0x9c, 0x79, 0xc0, 0xea, 0x10, 0x4a, - 0xed, 0x26, 0x31, 0xa7, 0xd3, 0x01, 0x6b, 0x27, 0x22, 0x63, 0xc5, 0xb7, 0xae, 0x40, 0xae, 0x1e, - 0xb8, 0xb5, 0xfa, 0xd3, 0x0a, 0x5c, 0xeb, 0xcf, 0x06, 0xf0, 0x50, 0x77, 0x2e, 0xb1, 0xfe, 0x5b, - 0xe9, 0x58, 0x7f, 0x73, 0x7c, 0x27, 0x1f, 0x11, 0xea, 0x7f, 0x93, 0x15, 0xc6, 0x89, 0x48, 0xff, - 0x9e, 0x01, 0x25, 0xcf, 0xf7, 0x98, 0xee, 0xec, 0xa9, 0x79, 0x69, 0xa2, 0x62, 0x4d, 0x2b, 0xaa, - 0xbc, 0x28, 0xc1, 0x4b, 0xb5, 0x94, 0x7e, 0x3c, 0x80, 0x87, 0x98, 0x08, 0xd1, 0x0a, 0x3d, 0x73, - 0x46, 0xe8, 0xc9, 0xf0, 0xac, 0x90, 0x13, 0x38, 0xe8, 0x75, 0x40, 0x94, 0x84, 0x0f, 0x3c, 0x87, - 0x6c, 0x38, 0x4e, 0xd0, 0xf3, 0x99, 0xb8, 0x8c, 0x88, 0xee, 0x3b, 0xb4, 0x97, 0x36, 0x4e, 0x48, - 0xe0, 0x21, 0xa3, 0x78, 0x3b, 0xa4, 0xaf, 0x33, 0x20, 0xdd, 0x0e, 0x9d, 0xbc, 0xd2, 0x40, 0xeb, - 0x50, 0xe4, 0xad, 0xd1, 0x7d, 0xc2, 0x1e, 0x06, 0xe1, 0x81, 0x59, 0x5c, 0x35, 0xae, 0xe6, 0xe3, - 0x2b, 0x96, 0x7b, 0x31, 0x0b, 0x27, 0xe5, 0xac, 0x5f, 0xe5, 0xa0, 0xa0, 0x03, 0x17, 0x5a, 0x83, - 0x5c, 0xb7, 0x65, 0x53, 0x15, 0x90, 0x2e, 0xeb, 0x86, 0x88, 0x13, 0x8f, 0xa3, 0x04, 0x2c, 0x9e, - 0x71, 0x24, 0x87, 0x1e, 0xa6, 0x12, 0x61, 0x66, 0xa2, 0x2b, 0x83, 0x64, 0xb4, 0x7b, 0x66, 0x1e, - 0x3c, 0xe5, 0x55, 0x20, 0xba, 0xc2, 0xbb, 0x44, 0xb7, 0x56, 0x97, 0x07, 0x38, 0xd1, 0xe2, 0xb9, - 0xb5, 0x3a, 0x8e, 0x78, 0xbc, 0x86, 0x10, 0x0f, 0xd4, 0x9c, 0x9d, 0xa8, 0x86, 0x10, 0x4a, 0xe3, - 0xa9, 0x88, 0x57, 0x8a, 0xa5, 0x6e, 0xe4, 0xc9, 0x3b, 0x3f, 0x11, 0xf6, 0x66, 0xce, 0x20, 0xec, - 0xcd, 0xe9, 0xfb, 0x3e, 0x11, 0xe9, 0x62, 0xed, 0xe8, 0x67, 0x06, 0x2c, 0x38, 0xe9, 0xfb, 0x3e, - 0x42, 0xcd, 0xfc, 0x44, 0xd7, 0x48, 0x03, 0xf7, 0x87, 0xda, 0x39, 0x16, 0xaa, 0x83, 0x40, 0xf8, - 0x24, 0x36, 0xba, 0x05, 0xf9, 0x6f, 0x07, 0xb4, 0xda, 0xb6, 0x29, 0x35, 0x0b, 0xa9, 0xba, 0x3f, - 0xff, 0xd5, 0x37, 0x1b, 0x82, 0x7e, 0x7c, 0xb8, 0x52, 0xac, 0x07, 0xae, 0x7a, 0xc5, 0x7a, 0x80, - 0xf5, 0x63, 0x03, 0x20, 0xee, 0xd3, 0xf5, 0x55, 0x9c, 0x71, 0xaa, 0xab, 0xb8, 0xcc, 0x73, 0x5d, - 0x31, 0xaf, 0x40, 0x8e, 0x84, 0x61, 0x10, 0xca, 0x1b, 0xbc, 0x02, 0xf7, 0x95, 0x2d, 0x4e, 0xc0, - 0x11, 0xdd, 0xfa, 0xe3, 0x14, 0x4c, 0x37, 0x88, 0x13, 0x12, 0x76, 0x2e, 0xe5, 0xd0, 0x67, 0xa0, - 0xe0, 0x75, 0x3a, 0x3d, 0x66, 0xef, 0xb5, 0x89, 0x70, 0xfd, 0x7c, 0xe4, 0x06, 0x35, 0x45, 0xc4, - 0x31, 0x1f, 0x85, 0x30, 0x25, 0x26, 0x17, 0x9d, 0xcb, 0xd7, 0xc6, 0xdc, 0xf8, 0xc8, 0xda, 0xf2, - 0xa6, 0xcd, 0xec, 0x2d, 0x9f, 0x85, 0x7d, 0x9d, 0xef, 0xa7, 0x38, 0xe9, 0x27, 0x7f, 0x5d, 0xc9, - 0x55, 0xfa, 0x8c, 0x50, 0x2c, 0xb0, 0xd0, 0xf7, 0x0d, 0x00, 0xca, 0x42, 0xcf, 0x6f, 0x72, 0xae, - 0xac, 0x8d, 0x77, 0x26, 0x83, 0x6e, 0x68, 0x7d, 0xd1, 0x04, 0xf4, 0x12, 0xc5, 0x0c, 0x9c, 0x00, - 0x45, 0x65, 0x59, 0x56, 0x65, 0x53, 0x71, 0x57, 0x95, 0x55, 0x10, 0x69, 0x8d, 0x0b, 0xaa, 0xa5, - 0x2f, 0x41, 0x41, 0x2b, 0x47, 0x2f, 0x40, 0xf6, 0x80, 0xf4, 0xa3, 0x08, 0x88, 0xf9, 0x23, 0xba, - 0x04, 0xb9, 0x07, 0x76, 0xbb, 0x17, 0x5d, 0x4a, 0xcd, 0xe2, 0xe8, 0xe5, 0x66, 0xe6, 0x86, 0xb1, - 0xf4, 0x2a, 0xcc, 0x0f, 0xcc, 0xed, 0x59, 0xc3, 0x0b, 0x89, 0xe1, 0xd6, 0xc7, 0x06, 0xc8, 0xc9, - 0x9c, 0x4b, 0x49, 0xb0, 0x97, 0x2e, 0x09, 0x5e, 0x9d, 0x68, 0x93, 0x46, 0x54, 0x05, 0x7f, 0xca, - 0xc0, 0x8c, 0xcc, 0x77, 0xe7, 0x72, 0x5e, 0xdc, 0x54, 0xfb, 0x50, 0x19, 0xdb, 0x44, 0x61, 0xc1, - 0xc8, 0x16, 0xa2, 0x3d, 0xd0, 0x42, 0x6c, 0x4e, 0x88, 0xf3, 0xf4, 0x36, 0xe2, 0xc8, 0x80, 0xa2, - 0x94, 0x3c, 0x17, 0xbf, 0x71, 0xd2, 0x7e, 0x73, 0x7b, 0x32, 0x63, 0x47, 0x38, 0xce, 0xef, 0x62, - 0x23, 0x4f, 0xf9, 0x0f, 0xcd, 0xf8, 0x41, 0x5f, 0x25, 0x94, 0xec, 0xc8, 0x84, 0x22, 0x6b, 0x31, - 0xf1, 0x5f, 0x67, 0x2e, 0xfd, 0xf7, 0xe8, 0x7d, 0x49, 0xc7, 0x5a, 0xc2, 0xfa, 0x45, 0x51, 0xcf, - 0x5d, 0x94, 0xc3, 0x4d, 0x75, 0x55, 0x6d, 0x4c, 0xd4, 0xb0, 0x27, 0x96, 0x63, 0xc4, 0xff, 0xce, - 0xdf, 0x85, 0x3c, 0x25, 0x6d, 0xe2, 0xb0, 0x20, 0x94, 0x9b, 0x53, 0x9f, 0xdc, 0xe3, 0xcb, 0x0d, - 0xa9, 0x32, 0x0a, 0xbe, 0xda, 0x70, 0x45, 0xc6, 0x1a, 0x13, 0xad, 0x41, 0xc1, 0x69, 0xf7, 0x28, - 0x23, 0x61, 0xad, 0x2e, 0xa3, 0xaf, 0xbe, 0x59, 0xa8, 0x2a, 0x06, 0x8e, 0x65, 0x50, 0x19, 0x40, - 0xbf, 0x50, 0x13, 0x89, 0x9b, 0x9e, 0x92, 0x28, 0xfb, 0x34, 0x15, 0x27, 0x24, 0xd0, 0x9a, 0x8c, - 0xec, 0xd1, 0xdf, 0x7b, 0xff, 0x37, 0x10, 0xd9, 0xd5, 0xa2, 0x27, 0x7a, 0xe5, 0x6b, 0x50, 0x24, - 0xef, 0x32, 0x12, 0xfa, 0x76, 0x9b, 0x23, 0xe4, 0x04, 0xc2, 0x3c, 0x2f, 0x89, 0xb7, 0x62, 0x32, - 0x4e, 0xca, 0xa0, 0x5d, 0x98, 0xa7, 0x84, 0x52, 0x2f, 0xf0, 0x37, 0xf6, 0xf7, 0x79, 0x57, 0xd1, - 0x17, 0xd5, 0x5a, 0xa1, 0xf2, 0x69, 0x09, 0x37, 0xdf, 0x48, 0xb3, 0x8f, 0x05, 0x29, 0xaa, 0xdf, - 0x25, 0x09, 0x0f, 0xaa, 0x40, 0xb7, 0xa1, 0xd4, 0x4e, 0xde, 0xf4, 0xd7, 0x65, 0x57, 0xa0, 0xfb, - 0x99, 0xd4, 0xff, 0x00, 0x75, 0x3c, 0x20, 0x8d, 0xbe, 0x0e, 0x66, 0x92, 0xd2, 0x08, 0x7a, 0xa1, - 0x43, 0xb0, 0xed, 0x37, 0x49, 0xf4, 0x79, 0x41, 0xa1, 0xf2, 0xd2, 0xd1, 0xe1, 0x8a, 0xb9, 0x3d, - 0x42, 0x06, 0x8f, 0x1c, 0x8d, 0x1e, 0xc2, 0xa2, 0x32, 0x7f, 0x37, 0xb4, 0xf7, 0xf7, 0x3d, 0xa7, - 0x1e, 0xb4, 0x3d, 0xa7, 0x2f, 0x7a, 0x88, 0x42, 0x65, 0x43, 0x4e, 0x70, 0x71, 0x6b, 0x98, 0xd0, - 0xf1, 0xe1, 0xca, 0xaa, 0xb4, 0x7d, 0x28, 0x5f, 0x6c, 0xc5, 0x70, 0xfd, 0x1c, 0xd8, 0xf3, 0x87, - 0x01, 0xbf, 0x98, 0x06, 0xae, 0xf9, 0x4f, 0x07, 0x1e, 0xca, 0x8f, 0x80, 0x87, 0xea, 0x47, 0x3b, - 0x70, 0xb1, 0x45, 0xec, 0x36, 0x6b, 0x55, 0x5b, 0xc4, 0x39, 0x50, 0x07, 0xd8, 0x9c, 0x15, 0x07, - 0x5b, 0x39, 0xd5, 0xc5, 0x7b, 0x27, 0x45, 0xf0, 0xb0, 0x71, 0xe8, 0x97, 0x06, 0x2c, 0x0e, 0x6c, - 0x77, 0xf4, 0x0d, 0x8c, 0x59, 0x9a, 0xe8, 0x53, 0x83, 0xc6, 0x30, 0x9d, 0x95, 0xcb, 0x7c, 0x49, - 0x86, 0xb2, 0xf0, 0xf0, 0x59, 0xa0, 0x9b, 0x00, 0x5e, 0xf7, 0xae, 0xdd, 0xf1, 0xda, 0x1e, 0xa1, - 0xe6, 0x45, 0xe1, 0x2c, 0x4b, 0xfc, 0x90, 0xd5, 0xea, 0x8a, 0xca, 0x03, 0xa3, 0x7c, 0xeb, 0xe3, - 0x84, 0x34, 0xda, 0x86, 0x92, 0x7c, 0xeb, 0xcb, 0xcd, 0x59, 0x10, 0x9b, 0xf3, 0x09, 0xd1, 0x82, - 0xd7, 0x93, 0x9c, 0xe3, 0x13, 0x14, 0x3c, 0x30, 0x16, 0x55, 0x61, 0x21, 0xe9, 0x86, 0x51, 0x3b, - 0xb0, 0x28, 0x14, 0x2e, 0xf2, 0x56, 0x62, 0x7b, 0x90, 0x89, 0x4f, 0xca, 0x2f, 0xdd, 0x82, 0xb9, - 0x54, 0x44, 0x7a, 0xae, 0x92, 0xeb, 0x83, 0x0c, 0x1f, 0x9d, 0xc8, 0xb2, 0xe8, 0x07, 0x06, 0xcc, - 0x26, 0x41, 0x64, 0x0a, 0xad, 0x9d, 0xc1, 0xdf, 0x79, 0x32, 0x8f, 0xeb, 0x6f, 0x76, 0x92, 0x3c, - 0x9c, 0x02, 0x45, 0xbd, 0x21, 0x8d, 0xf4, 0xc6, 0xb8, 0x59, 0xfc, 0xd4, 0x6d, 0xb4, 0xf5, 0xbe, - 0x01, 0xc3, 0x7d, 0x09, 0x05, 0x90, 0x77, 0xe4, 0x07, 0x5d, 0x72, 0x45, 0xc6, 0xfe, 0x42, 0x24, - 0xf5, 0x5d, 0x58, 0x74, 0x91, 0xaf, 0x68, 0x58, 0x83, 0x58, 0xff, 0x30, 0x20, 0x27, 0x6e, 0xdd, - 0xd1, 0xcb, 0x89, 0xfd, 0xac, 0x14, 0xa5, 0x05, 0xd9, 0x37, 0x48, 0x3f, 0xda, 0xdc, 0x2b, 0xa9, - 0xcd, 0x8d, 0x33, 0xe1, 0x5b, 0x9c, 0x28, 0xf7, 0x1a, 0xad, 0xc3, 0x34, 0xd9, 0xdf, 0x27, 0x0e, - 0x93, 0x69, 0xe8, 0x65, 0x55, 0x4b, 0x6d, 0x09, 0x2a, 0x4f, 0x16, 0x02, 0x2c, 0x7a, 0xc5, 0x52, - 0x98, 0xf7, 0xe8, 0xcc, 0xeb, 0x90, 0x0d, 0xd7, 0x25, 0xee, 0x99, 0x5c, 0x4d, 0x8a, 0xe6, 0x6c, - 0x57, 0xa9, 0xc4, 0xb1, 0x76, 0xde, 0xd4, 0x5e, 0xe6, 0x41, 0xca, 0xdd, 0x0e, 0x1c, 0xbb, 0x1d, - 0x15, 0xaf, 0x98, 0xec, 0x93, 0x90, 0xf8, 0x0e, 0x41, 0x57, 0x21, 0x6f, 0x77, 0xbd, 0xd7, 0xc2, - 0xa0, 0xa7, 0x2e, 0x13, 0xc5, 0xba, 0x6d, 0xd4, 0x6b, 0x82, 0x86, 0x35, 0x97, 0x17, 0x2f, 0x07, - 0x9e, 0xef, 0xca, 0xd5, 0xd0, 0xc5, 0xcb, 0x1b, 0x9e, 0xef, 0x62, 0xc1, 0xd1, 0xa5, 0x53, 0x76, - 0x54, 0xe9, 0x64, 0xdd, 0x86, 0x62, 0xe2, 0x7b, 0x32, 0x9e, 0xc6, 0x3b, 0xfc, 0xa1, 0x6e, 0xb3, - 0xd6, 0x60, 0x1a, 0xdf, 0x51, 0x0c, 0x1c, 0xcb, 0x54, 0xbe, 0xf9, 0xe8, 0xc9, 0xf2, 0x85, 0x8f, - 0x9e, 0x2c, 0x5f, 0x78, 0xfc, 0x64, 0xf9, 0xc2, 0x7b, 0x47, 0xcb, 0xc6, 0xa3, 0xa3, 0x65, 0xe3, - 0xa3, 0xa3, 0x65, 0xe3, 0xf1, 0xd1, 0xb2, 0xf1, 0xf1, 0xd1, 0xb2, 0xf1, 0xf3, 0xbf, 0x2d, 0x5f, - 0xf8, 0xc6, 0xfa, 0x58, 0x5f, 0x60, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff, 0xea, 0x56, 0xb6, 0xbc, - 0xb9, 0x29, 0x00, 0x00, + 0x15, 0x9e, 0xb6, 0xe3, 0xc4, 0x7e, 0x4e, 0x9c, 0x49, 0xcd, 0x64, 0xd5, 0x13, 0x76, 0x93, 0xd0, + 0x83, 0xd0, 0xf0, 0x67, 0x6b, 0x06, 0x02, 0xc3, 0xcc, 0xee, 0x30, 0xb1, 0x93, 0xd9, 0xf1, 0x6e, + 0x32, 0xdb, 0x94, 0xa3, 0x05, 0xc1, 0x81, 0xed, 0x74, 0x57, 0xec, 0xde, 0xd8, 0xdd, 0xa6, 0xab, + 0x9c, 0x59, 0x4b, 0x08, 0x16, 0x21, 0xd0, 0x02, 0x2b, 0xc1, 0x91, 0x0b, 0x17, 0x6e, 0x9c, 0xe1, + 0x82, 0x38, 0x23, 0x86, 0xdb, 0x72, 0x1b, 0x21, 0x14, 0x76, 0x82, 0xc4, 0x8d, 0x03, 0xe2, 0x44, + 0x90, 0x10, 0xaa, 0xea, 0xea, 0xea, 0x6e, 0xc7, 0x9e, 0xc9, 0xd8, 0x91, 0xa2, 0x3d, 0xb9, 0xfb, + 0xbd, 0x57, 0xef, 0xab, 0x57, 0xf5, 0xea, 0xfd, 0x94, 0x1b, 0x36, 0x9b, 0x2e, 0x6b, 0xf5, 0x76, + 0xcb, 0xb6, 0xdf, 0xa9, 0xd8, 0x6e, 0xdb, 0xed, 0xa9, 0x9f, 0xee, 0x7e, 0xb3, 0xb2, 0x7f, 0x93, + 0x56, 0x68, 0xdb, 0xed, 0x88, 0x07, 0xab, 0xeb, 0x56, 0x6c, 0x3f, 0x20, 0x95, 0x83, 0xeb, 0x95, + 0x26, 0xf1, 0x48, 0x60, 0x31, 0xe2, 0x94, 0xbb, 0x81, 0xcf, 0x7c, 0xb4, 0x16, 0xab, 0x29, 0x87, + 0xe3, 0xa3, 0x9f, 0xee, 0x7e, 0xb3, 0xbc, 0x7f, 0x93, 0x96, 0xb9, 0x1a, 0xf1, 0x60, 0x75, 0xdd, + 0x32, 0x57, 0x53, 0x3e, 0xb8, 0xbe, 0x74, 0xef, 0xb9, 0xd0, 0x69, 0xa5, 0x43, 0x98, 0x35, 0x04, + 0x7e, 0xe9, 0x73, 0x09, 0x3d, 0x4d, 0xbf, 0xe9, 0x57, 0x04, 0x79, 0xb7, 0xb7, 0x27, 0xde, 0xc4, + 0x8b, 0x78, 0x92, 0xe2, 0x5c, 0x61, 0xd9, 0xf5, 0xb9, 0xce, 0x8e, 0x65, 0xb7, 0x5c, 0x8f, 0x04, + 0x7d, 0x81, 0x18, 0xf4, 0x3c, 0xe6, 0x76, 0xc8, 0x09, 0xfd, 0x5f, 0x7c, 0xd6, 0x00, 0x6a, 0xb7, + 0x48, 0xc7, 0x1a, 0x1c, 0x67, 0x6c, 0x41, 0xa9, 0xd6, 0x76, 0x89, 0xc7, 0xea, 0x66, 0xcd, 0xf7, + 0xf6, 0xdc, 0x26, 0xba, 0x05, 0x25, 0x3e, 0xc0, 0xef, 0xb1, 0x06, 0xb1, 0x7d, 0xcf, 0xa1, 0xba, + 0xb6, 0xaa, 0x5d, 0xcb, 0x55, 0xd1, 0xd1, 0xe1, 0x4a, 0x69, 0x27, 0xc5, 0xc1, 0x03, 0x92, 0xc6, + 0xef, 0x33, 0x50, 0xa8, 0xf9, 0x1e, 0xb3, 0x38, 0x3e, 0x5a, 0x85, 0x29, 0xcf, 0xea, 0x10, 0x31, + 0xbe, 0x50, 0x9d, 0x7d, 0x74, 0xb8, 0x72, 0xe1, 0xe8, 0x70, 0x65, 0xea, 0x81, 0xd5, 0x21, 0x58, + 0x70, 0xd0, 0x55, 0xc8, 0xb9, 0x1d, 0xab, 0x49, 0xf4, 0x8c, 0x10, 0x99, 0x93, 0x22, 0xb9, 0x3a, + 0x27, 0xe2, 0x90, 0x87, 0x5c, 0xc8, 0x75, 0xfd, 0x80, 0x51, 0x7d, 0x7a, 0x35, 0x7b, 0xad, 0x78, + 0x63, 0xa3, 0x3c, 0xd6, 0x4e, 0x96, 0xd5, 0xbc, 0x4c, 0x3f, 0x60, 0x31, 0x14, 0x7f, 0xa3, 0x38, + 0x44, 0x40, 0xdf, 0x81, 0xd9, 0x03, 0xbf, 0xdd, 0xeb, 0x90, 0x6d, 0xbf, 0xe7, 0x31, 0xaa, 0x17, + 0x04, 0x62, 0x75, 0x4c, 0xc4, 0x37, 0x63, 0x55, 0xd5, 0xcb, 0x12, 0x6f, 0x36, 0x41, 0xa4, 0x38, + 0x85, 0x66, 0xfc, 0x4f, 0x83, 0xb9, 0xd4, 0x2c, 0x4f, 0xb1, 0x82, 0x9f, 0x85, 0x7c, 0xcb, 0xa7, + 0x8c, 0x4b, 0x8b, 0x45, 0xcc, 0x55, 0x2f, 0x4a, 0xa9, 0xfc, 0x7d, 0x49, 0xc7, 0x4a, 0x02, 0xdd, + 0x86, 0x39, 0x3b, 0x09, 0xa0, 0x67, 0xc5, 0x90, 0x45, 0x39, 0x24, 0x8d, 0x8e, 0xd3, 0xb2, 0xe8, + 0x26, 0xe4, 0x85, 0xcf, 0xd8, 0x7e, 0x5b, 0x9f, 0x12, 0x13, 0x7a, 0x31, 0x82, 0x32, 0x25, 0xfd, + 0x38, 0xf1, 0x8c, 0x95, 0x34, 0xfa, 0x24, 0x4c, 0xf3, 0x29, 0xd4, 0x4d, 0x3d, 0x27, 0xc6, 0x95, + 0xe4, 0xb8, 0xe9, 0xfb, 0x82, 0x8a, 0x25, 0xd7, 0xf8, 0xa1, 0x06, 0x25, 0x35, 0x85, 0x06, 0xb3, + 0x18, 0x41, 0x14, 0x66, 0x82, 0x9e, 0xe7, 0xb9, 0x5e, 0x53, 0x98, 0x57, 0xbc, 0xb1, 0x35, 0xe9, + 0xf6, 0x0b, 0xbd, 0x38, 0xd4, 0x59, 0x2d, 0x1e, 0x1d, 0xae, 0xcc, 0xc8, 0x17, 0x1c, 0x21, 0x19, + 0x3f, 0xd2, 0x60, 0x71, 0xa8, 0x3c, 0xea, 0x40, 0x81, 0x32, 0x2b, 0x60, 0xc4, 0x59, 0x67, 0x62, + 0x57, 0x8a, 0x37, 0x5e, 0x7e, 0xbe, 0x09, 0xd1, 0x32, 0x0f, 0x11, 0x7c, 0x46, 0xfc, 0x2c, 0x55, + 0x17, 0xe4, 0x52, 0x14, 0x1a, 0x91, 0x5a, 0x1c, 0x23, 0x18, 0xbf, 0xd5, 0x60, 0x3e, 0x35, 0x91, + 0x1e, 0x45, 0x6f, 0x43, 0x8e, 0xf2, 0x29, 0xc9, 0xf5, 0xd8, 0x3c, 0x93, 0xf5, 0x88, 0xcf, 0x43, + 0x68, 0x6e, 0x08, 0x81, 0xd6, 0xa0, 0xa8, 0x7c, 0xa0, 0xbe, 0xa1, 0xe7, 0xc5, 0xee, 0x5d, 0x92, + 0xa2, 0xc5, 0x5a, 0xcc, 0xc2, 0x49, 0x39, 0xe3, 0x6b, 0x30, 0xbf, 0xe9, 0x39, 0x5d, 0xdf, 0xf5, + 0xd8, 0xba, 0xe3, 0x04, 0x84, 0x52, 0xb4, 0x04, 0x19, 0xb7, 0x2b, 0xfd, 0x18, 0xa4, 0x82, 0x4c, + 0xdd, 0xc4, 0x19, 0xb7, 0x8b, 0xae, 0x41, 0xde, 0xf3, 0x1d, 0xc2, 0xbd, 0x5a, 0x3a, 0xd6, 0x2c, + 0x77, 0xaa, 0x07, 0x92, 0x86, 0x15, 0xd7, 0x78, 0x5f, 0x83, 0xd9, 0x48, 0xf3, 0x29, 0x0f, 0xc8, + 0x2a, 0x4c, 0x75, 0xe3, 0xc3, 0xa1, 0x24, 0x84, 0x83, 0x0b, 0x4e, 0xca, 0xaf, 0xb3, 0xcf, 0xe3, + 0xd7, 0xc6, 0xbf, 0x35, 0x28, 0x45, 0xd3, 0x69, 0xf4, 0x76, 0x29, 0x61, 0xe8, 0x21, 0x14, 0xac, + 0xd0, 0x64, 0xc2, 0x03, 0x27, 0x0f, 0x1f, 0xf7, 0xc6, 0xdc, 0xa1, 0x81, 0x25, 0x8c, 0x5d, 0x65, + 0x3d, 0x02, 0xc0, 0x31, 0x16, 0x6a, 0x45, 0x51, 0x32, 0x2b, 0x40, 0x6b, 0x13, 0x82, 0x8e, 0x0e, + 0x92, 0xc6, 0xbf, 0x34, 0x28, 0x44, 0x62, 0x14, 0x05, 0x90, 0xe7, 0x0e, 0xed, 0x58, 0xcc, 0x92, + 0x07, 0xa2, 0x3a, 0xee, 0x81, 0x78, 0x63, 0xf7, 0x6d, 0x62, 0xb3, 0x6d, 0xc2, 0xac, 0x2a, 0x92, + 0xc8, 0x10, 0xd3, 0xb0, 0xc2, 0x41, 0x5d, 0x98, 0xa1, 0x62, 0xb9, 0xa9, 0x9e, 0x11, 0xd6, 0x6e, + 0x4e, 0x68, 0x6d, 0xb8, 0x79, 0xd5, 0x79, 0x89, 0x3a, 0x13, 0xbe, 0x53, 0x1c, 0xc1, 0x18, 0xff, + 0xd0, 0x60, 0x4e, 0xd9, 0xbc, 0xe5, 0x52, 0x86, 0xbc, 0x13, 0x76, 0xdf, 0x1d, 0xd7, 0x6e, 0xae, + 0x4f, 0x58, 0xad, 0x42, 0x77, 0x44, 0x49, 0xd8, 0x4c, 0x20, 0xe7, 0x32, 0xd2, 0x89, 0x2c, 0xbe, + 0x3b, 0xa1, 0xc5, 0x34, 0x91, 0x6c, 0xb9, 0x5a, 0x1c, 0x6a, 0x37, 0xfe, 0xa8, 0xc1, 0xa5, 0x2d, + 0xdf, 0x72, 0xaa, 0x56, 0xdb, 0xf2, 0x6c, 0x12, 0xd4, 0xbd, 0xe6, 0x33, 0xcf, 0xaf, 0xcc, 0x41, + 0xe2, 0x20, 0x86, 0x89, 0x3c, 0x95, 0x83, 0x3c, 0x71, 0x86, 0x23, 0x09, 0xb4, 0x17, 0x39, 0xea, + 0x94, 0x30, 0x64, 0x7d, 0x4c, 0x43, 0xb8, 0x4b, 0x86, 0x11, 0x71, 0x84, 0x9b, 0xfe, 0x54, 0x03, + 0x94, 0xb4, 0x44, 0x86, 0xcf, 0x1e, 0xcc, 0xb8, 0xa1, 0x4d, 0xf2, 0x78, 0xbe, 0x36, 0xe6, 0x04, + 0x86, 0xac, 0x52, 0xec, 0x40, 0x92, 0x80, 0x23, 0x2c, 0xe3, 0x7b, 0x50, 0xe0, 0x41, 0x89, 0x76, + 0x2d, 0x9b, 0x9c, 0xc7, 0x99, 0x11, 0x1e, 0xac, 0x66, 0xf0, 0x51, 0xf6, 0x60, 0x65, 0xc4, 0x08, + 0x0f, 0x7e, 0x94, 0x81, 0x29, 0x9e, 0x3a, 0xce, 0x25, 0x32, 0x59, 0x30, 0x45, 0xbb, 0xc4, 0x96, + 0xb9, 0xf9, 0x2b, 0xe3, 0x9a, 0xe8, 0x3b, 0xa4, 0xd1, 0x25, 0x76, 0x9c, 0xae, 0xf8, 0x1b, 0x16, + 0xaa, 0x91, 0x0b, 0xd3, 0x54, 0xb8, 0xb2, 0x48, 0x56, 0xe3, 0x1f, 0x20, 0x01, 0x12, 0x1e, 0x20, + 0x55, 0x8f, 0x85, 0xef, 0x58, 0x02, 0x18, 0x1d, 0x28, 0x72, 0xa9, 0x28, 0x87, 0x7f, 0x1e, 0xa6, + 0x58, 0xbf, 0x1b, 0x25, 0xdb, 0x95, 0x68, 0x6e, 0x3b, 0xfd, 0x2e, 0x39, 0x3e, 0x5c, 0x99, 0x4f, + 0x88, 0x72, 0x12, 0x16, 0xc2, 0xe8, 0x53, 0x30, 0x23, 0x93, 0x94, 0x8c, 0x0d, 0xea, 0x8c, 0x48, + 0x59, 0x1c, 0xf1, 0x8d, 0x5f, 0x73, 0x17, 0xf5, 0x1d, 0x52, 0xf3, 0x3d, 0xc7, 0x65, 0xae, 0xef, + 0xa1, 0xb5, 0x14, 0xe2, 0xc7, 0x07, 0x10, 0x17, 0x52, 0xc2, 0x09, 0xcc, 0x2f, 0xab, 0x25, 0xca, + 0xa4, 0x06, 0x4a, 0xfb, 0xf8, 0x64, 0xd5, 0xb0, 0xb4, 0xc9, 0xbc, 0x54, 0x0d, 0x88, 0x45, 0x7d, + 0x6f, 0xb0, 0x54, 0xc5, 0x82, 0x8a, 0x25, 0xd7, 0xf8, 0xab, 0x06, 0xa2, 0x40, 0x39, 0x97, 0x93, + 0xf4, 0x56, 0xfa, 0x24, 0xdd, 0x9e, 0xc0, 0x03, 0x46, 0x1c, 0xa2, 0xff, 0x48, 0xf3, 0xb8, 0xdf, + 0xf1, 0x2d, 0xec, 0xfa, 0x4e, 0xad, 0xbe, 0x81, 0xe5, 0x46, 0xa8, 0x2d, 0x34, 0x43, 0x32, 0x8e, + 0xf8, 0xbc, 0x94, 0x93, 0x8f, 0x54, 0x9f, 0x59, 0xcd, 0x46, 0xa5, 0x9c, 0x94, 0xa3, 0x58, 0x71, + 0xd1, 0x0d, 0x80, 0x6e, 0xe0, 0x1f, 0xb8, 0x8e, 0xa8, 0x2c, 0xc3, 0xba, 0x4b, 0x9d, 0x2d, 0x53, + 0x71, 0x70, 0x42, 0x0a, 0x39, 0x30, 0xcd, 0x6b, 0x4c, 0x46, 0xf5, 0x9c, 0x30, 0xfc, 0xe5, 0x31, + 0x0d, 0xdf, 0xe1, 0x4a, 0xe2, 0xad, 0x15, 0xaf, 0x14, 0x4b, 0xdd, 0xc6, 0x7f, 0x35, 0x80, 0xf8, + 0x70, 0xa0, 0x77, 0x00, 0xec, 0xc8, 0x59, 0xa2, 0xa4, 0xb5, 0x31, 0xc1, 0x8a, 0x2b, 0xcf, 0x8b, + 0xcd, 0x55, 0x24, 0x8a, 0x13, 0x58, 0x88, 0x26, 0x6b, 0xc9, 0xdc, 0x44, 0xad, 0x68, 0xe2, 0x6c, + 0x3e, 0xbd, 0x8e, 0x34, 0xfe, 0x90, 0x81, 0xac, 0xe9, 0x3b, 0xe7, 0x12, 0x3d, 0xdf, 0x4a, 0x45, + 0xcf, 0x3b, 0x63, 0x57, 0x06, 0xce, 0xc8, 0xe0, 0xd9, 0x1a, 0x08, 0x9e, 0x77, 0x27, 0xc0, 0x78, + 0x7a, 0xec, 0x7c, 0x9c, 0x85, 0x59, 0xee, 0xf6, 0x2a, 0x96, 0x7d, 0x21, 0x15, 0xcb, 0x56, 0x07, + 0x62, 0xd9, 0xc5, 0xa4, 0xec, 0xd9, 0x84, 0xb2, 0x3e, 0xcc, 0xb5, 0x2d, 0xca, 0xcc, 0xc0, 0xdf, + 0x25, 0xbc, 0xd7, 0x94, 0x26, 0x4f, 0xd6, 0xaf, 0xaa, 0xab, 0x82, 0xad, 0xa4, 0x6a, 0x9c, 0x46, + 0x42, 0xef, 0x69, 0x80, 0x38, 0x65, 0x27, 0xb0, 0x3c, 0x1a, 0x9a, 0xe4, 0xca, 0xe6, 0x6e, 0xd2, + 0x09, 0x2c, 0xc9, 0x09, 0xa0, 0xad, 0x13, 0xfa, 0xf1, 0x10, 0xcc, 0xd3, 0x06, 0x74, 0x1e, 0xe4, + 0x3a, 0x84, 0x52, 0xab, 0x49, 0xf4, 0xe9, 0x74, 0x90, 0xdb, 0x0e, 0xc9, 0x38, 0xe2, 0x1b, 0x57, + 0x21, 0x67, 0xfa, 0x4e, 0xdd, 0x7c, 0x5a, 0x51, 0x6c, 0xfc, 0x45, 0x03, 0x1e, 0x1e, 0xcf, 0x25, + 0x3f, 0x7c, 0x2b, 0x9d, 0x1f, 0x6e, 0x8d, 0xef, 0xe4, 0x23, 0xd2, 0xc3, 0x6f, 0xb2, 0xc2, 0x38, + 0x91, 0x1d, 0xde, 0xd5, 0xa0, 0xe4, 0x7a, 0x2e, 0x53, 0xb7, 0x01, 0x54, 0xbf, 0x3c, 0x51, 0x81, + 0xa7, 0x14, 0x55, 0x5f, 0x90, 0xe0, 0xa5, 0x7a, 0x4a, 0x3f, 0x1e, 0xc0, 0x43, 0x4c, 0x84, 0xe8, + 0x08, 0x3d, 0x73, 0x46, 0xe8, 0xc9, 0xf0, 0x1c, 0x21, 0x27, 0x70, 0xd0, 0x6b, 0x80, 0x28, 0x09, + 0x0e, 0x5c, 0x9b, 0xac, 0xdb, 0xb6, 0xdf, 0xf3, 0x98, 0xb8, 0xc0, 0x08, 0xef, 0x48, 0x94, 0x97, + 0x36, 0x4e, 0x48, 0xe0, 0x21, 0xa3, 0x78, 0x0b, 0xa5, 0xae, 0x40, 0x20, 0xdd, 0x42, 0x9d, 0xbc, + 0x06, 0x41, 0x6b, 0x50, 0xe4, 0xed, 0xd4, 0x03, 0xc2, 0x1e, 0xfa, 0xc1, 0xbe, 0x5e, 0x5c, 0xd5, + 0xae, 0xe5, 0xe3, 0x6b, 0x99, 0xfb, 0x31, 0x0b, 0x27, 0xe5, 0x8c, 0x5f, 0xe5, 0xa0, 0xa0, 0x02, + 0x17, 0xaa, 0x40, 0xae, 0xdb, 0xb2, 0x68, 0x14, 0x90, 0xae, 0xa8, 0x26, 0x8a, 0x13, 0x8f, 0xc3, + 0xa4, 0x2d, 0x9e, 0x71, 0x28, 0x87, 0x1e, 0xa6, 0x12, 0x61, 0x66, 0xa2, 0x6b, 0x86, 0x64, 0xb4, + 0x7b, 0x66, 0x1e, 0x3c, 0xe5, 0xf5, 0x21, 0xba, 0xca, 0x3b, 0x4b, 0xa7, 0x6e, 0xca, 0x03, 0x9c, + 0x68, 0x0b, 0x9d, 0xba, 0x89, 0x43, 0x1e, 0xaf, 0x21, 0xc4, 0x03, 0xd5, 0x67, 0x27, 0xaa, 0x21, + 0x84, 0xd2, 0x78, 0x2a, 0xe2, 0x95, 0x62, 0xa9, 0x1b, 0xb9, 0xf2, 0x9e, 0x50, 0x84, 0xbd, 0x99, + 0x33, 0x08, 0x7b, 0x73, 0xea, 0x8e, 0x50, 0x44, 0xba, 0x58, 0x3b, 0xfa, 0x99, 0x06, 0x0b, 0x76, + 0xfa, 0x8e, 0x90, 0x50, 0x3d, 0x3f, 0xd1, 0xd5, 0xd3, 0xc0, 0x9d, 0xa3, 0x72, 0x8e, 0x85, 0xda, + 0x20, 0x10, 0x3e, 0x89, 0x8d, 0x6e, 0x43, 0xfe, 0xdb, 0x3e, 0xad, 0xb5, 0x2d, 0x4a, 0xf5, 0x42, + 0xaa, 0x57, 0xc8, 0x7f, 0xf5, 0x8d, 0x86, 0xa0, 0x1f, 0x1f, 0xae, 0x14, 0x4d, 0xdf, 0x89, 0x5e, + 0xb1, 0x1a, 0x60, 0xfc, 0x58, 0x03, 0x88, 0x7b, 0x7b, 0x75, 0x7d, 0xa7, 0x9d, 0xea, 0xfa, 0x2e, + 0xf3, 0x5c, 0xd7, 0xd2, 0x2b, 0x90, 0x23, 0x41, 0xe0, 0x07, 0xb2, 0xfa, 0x2c, 0x70, 0x5f, 0xd9, + 0xe4, 0x04, 0x1c, 0xd2, 0x8d, 0x3f, 0x4d, 0xc1, 0x74, 0x83, 0xd8, 0x01, 0x61, 0xe7, 0x52, 0x0e, + 0x7d, 0x06, 0x0a, 0x6e, 0xa7, 0xd3, 0x63, 0xd6, 0x6e, 0x9b, 0x08, 0xd7, 0xcf, 0x87, 0x6e, 0x50, + 0x8f, 0x88, 0x38, 0xe6, 0xa3, 0x00, 0xa6, 0xc4, 0xe4, 0xc2, 0x73, 0xf9, 0xea, 0x98, 0x1b, 0x1f, + 0x5a, 0x5b, 0xde, 0xb0, 0x98, 0xb5, 0xe9, 0xb1, 0xa0, 0xaf, 0xf2, 0xfd, 0x14, 0x27, 0xfd, 0xe4, + 0x6f, 0x2b, 0xb9, 0x6a, 0x9f, 0x11, 0x8a, 0x05, 0x16, 0xfa, 0xbe, 0x06, 0x40, 0x59, 0xe0, 0x7a, + 0x4d, 0xce, 0x95, 0xb5, 0xf1, 0xf6, 0x64, 0xd0, 0x0d, 0xa5, 0x2f, 0x9c, 0x80, 0x5a, 0xa2, 0x98, + 0x81, 0x13, 0xa0, 0xa8, 0x2c, 0xcb, 0xaa, 0x6c, 0x2a, 0xee, 0x46, 0x65, 0x15, 0x84, 0x5a, 0xe3, + 0x82, 0x6a, 0xe9, 0x4b, 0x50, 0x50, 0xca, 0xd1, 0x45, 0xc8, 0xee, 0x93, 0x7e, 0x18, 0x01, 0x31, + 0x7f, 0x44, 0x97, 0x21, 0x77, 0x60, 0xb5, 0x7b, 0xe1, 0x45, 0xd6, 0x2c, 0x0e, 0x5f, 0x6e, 0x65, + 0x6e, 0x6a, 0x4b, 0xaf, 0xc0, 0xfc, 0xc0, 0xdc, 0x9e, 0x35, 0xbc, 0x90, 0x18, 0x6e, 0x7c, 0xa8, + 0x81, 0x9c, 0xcc, 0xb9, 0x94, 0x04, 0xbb, 0xe9, 0x92, 0xe0, 0x95, 0x89, 0x36, 0x69, 0x44, 0x55, + 0xf0, 0xe7, 0x0c, 0xcc, 0xc8, 0x7c, 0x77, 0x2e, 0xe7, 0xc5, 0x49, 0xb5, 0x0f, 0xd5, 0xb1, 0x4d, + 0x14, 0x16, 0x8c, 0x6c, 0x21, 0xda, 0x03, 0x2d, 0xc4, 0xc6, 0x84, 0x38, 0x4f, 0x6f, 0x23, 0x8e, + 0x34, 0x28, 0x4a, 0xc9, 0x73, 0xf1, 0x1b, 0x3b, 0xed, 0x37, 0x77, 0x26, 0x33, 0x76, 0x84, 0xe3, + 0xfc, 0x2e, 0x36, 0xf2, 0x94, 0xff, 0xea, 0x8c, 0x1f, 0xf4, 0xa3, 0x84, 0x92, 0x1d, 0x99, 0x50, + 0x64, 0x2d, 0x26, 0xfe, 0x1f, 0xcd, 0xa5, 0xff, 0x52, 0x7d, 0x20, 0xe9, 0x58, 0x49, 0x18, 0xbf, + 0x28, 0xaa, 0xb9, 0x8b, 0x72, 0xb8, 0x19, 0x5d, 0x6f, 0x6b, 0x13, 0x35, 0xec, 0x89, 0xe5, 0x18, + 0xf1, 0x5f, 0xf5, 0x77, 0x21, 0x4f, 0x49, 0x9b, 0xd8, 0xcc, 0x0f, 0xe4, 0xe6, 0x98, 0x93, 0x7b, + 0x7c, 0xb9, 0x21, 0x55, 0x86, 0xc1, 0x57, 0x19, 0x1e, 0x91, 0xb1, 0xc2, 0x44, 0x15, 0x28, 0xd8, + 0xed, 0x1e, 0x65, 0x24, 0xa8, 0x9b, 0x32, 0xfa, 0xaa, 0x9b, 0x85, 0x5a, 0xc4, 0xc0, 0xb1, 0x0c, + 0x2a, 0x03, 0xa8, 0x17, 0xaa, 0x23, 0x71, 0x3b, 0x54, 0x12, 0x65, 0x9f, 0xa2, 0xe2, 0x84, 0x04, + 0xaa, 0xc8, 0xc8, 0x1e, 0xfe, 0x25, 0xf8, 0xb1, 0x81, 0xc8, 0x1e, 0x2d, 0x7a, 0xa2, 0x57, 0xbe, + 0x0e, 0x45, 0xf2, 0x0e, 0x23, 0x81, 0x67, 0xb5, 0x39, 0x42, 0x4e, 0x20, 0xcc, 0xf3, 0x92, 0x78, + 0x33, 0x26, 0xe3, 0xa4, 0x0c, 0xda, 0x81, 0x79, 0x4a, 0x28, 0x75, 0x7d, 0x6f, 0x7d, 0x6f, 0x8f, + 0x77, 0x15, 0x7d, 0x51, 0xad, 0x15, 0xaa, 0x9f, 0x96, 0x70, 0xf3, 0x8d, 0x34, 0xfb, 0x58, 0x90, + 0xc2, 0xfa, 0x5d, 0x92, 0xf0, 0xa0, 0x0a, 0x74, 0x07, 0x4a, 0xed, 0xe4, 0xbf, 0x03, 0xa6, 0xec, + 0x0a, 0x54, 0x3f, 0x93, 0xfa, 0xef, 0xc0, 0xc4, 0x03, 0xd2, 0xe8, 0xeb, 0xa0, 0x27, 0x29, 0x0d, + 0xbf, 0x17, 0xd8, 0x04, 0x5b, 0x5e, 0x93, 0x84, 0x9f, 0x24, 0x14, 0xaa, 0x2f, 0x1e, 0x1d, 0xae, + 0xe8, 0x5b, 0x23, 0x64, 0xf0, 0xc8, 0xd1, 0xe8, 0x21, 0x2c, 0x46, 0xe6, 0xef, 0x04, 0xd6, 0xde, + 0x9e, 0x6b, 0x9b, 0x7e, 0xdb, 0xb5, 0xfb, 0xa2, 0x87, 0x28, 0x54, 0xd7, 0xe5, 0x04, 0x17, 0x37, + 0x87, 0x09, 0x1d, 0x1f, 0xae, 0xac, 0x4a, 0xdb, 0x87, 0xf2, 0xc5, 0x56, 0x0c, 0xd7, 0xcf, 0x81, + 0x5d, 0x6f, 0x18, 0xf0, 0x0b, 0x69, 0xe0, 0xba, 0xf7, 0x74, 0xe0, 0xa1, 0xfc, 0x10, 0x78, 0xa8, + 0x7e, 0xb4, 0x0d, 0x97, 0x5a, 0xc4, 0x6a, 0xb3, 0x56, 0xad, 0x45, 0xec, 0xfd, 0xe8, 0x00, 0xeb, + 0xb3, 0xe2, 0x60, 0x47, 0x4e, 0x75, 0xe9, 0xfe, 0x49, 0x11, 0x3c, 0x6c, 0x1c, 0xfa, 0xa5, 0x06, + 0x8b, 0x03, 0xdb, 0x1d, 0x7e, 0x37, 0xa3, 0x97, 0x26, 0xfa, 0x3c, 0xa1, 0x31, 0x4c, 0x67, 0xf5, + 0x0a, 0x5f, 0x92, 0xa1, 0x2c, 0x3c, 0x7c, 0x16, 0xe8, 0x16, 0x80, 0xdb, 0xbd, 0x67, 0x75, 0xdc, + 0xb6, 0x4b, 0xa8, 0x7e, 0x49, 0x38, 0xcb, 0x12, 0x3f, 0x64, 0x75, 0x33, 0xa2, 0xf2, 0xc0, 0x28, + 0xdf, 0xfa, 0x38, 0x21, 0x8d, 0xb6, 0xa0, 0x24, 0xdf, 0xfa, 0x72, 0x73, 0x16, 0xc4, 0xe6, 0x7c, + 0x42, 0xb4, 0xe0, 0x66, 0x92, 0x73, 0x7c, 0x82, 0x82, 0x07, 0xc6, 0xa2, 0x1a, 0x2c, 0x24, 0xdd, + 0x30, 0x6c, 0x07, 0x16, 0x85, 0xc2, 0x45, 0xde, 0x4a, 0x6c, 0x0d, 0x32, 0xf1, 0x49, 0xf9, 0xa5, + 0xdb, 0x30, 0x97, 0x8a, 0x48, 0xcf, 0x55, 0x72, 0xbd, 0x9f, 0xe1, 0xa3, 0x13, 0x59, 0x16, 0xfd, + 0x40, 0x83, 0xd9, 0x24, 0x88, 0x4c, 0xa1, 0xf5, 0x33, 0xf8, 0x0b, 0x50, 0xe6, 0x71, 0xf5, 0x9d, + 0x4f, 0x92, 0x87, 0x53, 0xa0, 0xa8, 0x37, 0xa4, 0x91, 0x5e, 0x1f, 0x37, 0x8b, 0x9f, 0xba, 0x8d, + 0x36, 0xde, 0xd3, 0x60, 0xb8, 0x2f, 0x21, 0x1f, 0xf2, 0xb6, 0xfc, 0x08, 0x4c, 0xae, 0xc8, 0xd8, + 0x5f, 0x95, 0xa4, 0xbe, 0x25, 0x0b, 0x2f, 0xff, 0x23, 0x1a, 0x56, 0x20, 0xc6, 0x3f, 0x35, 0xc8, + 0x89, 0x5b, 0x77, 0xf4, 0x52, 0x62, 0x3f, 0xab, 0x45, 0x69, 0x41, 0xf6, 0x75, 0xd2, 0x0f, 0x37, + 0xf7, 0x6a, 0x6a, 0x73, 0xe3, 0x4c, 0xf8, 0x26, 0x27, 0xca, 0xbd, 0x46, 0x6b, 0x30, 0x4d, 0xf6, + 0xf6, 0x88, 0xcd, 0x64, 0x1a, 0x7a, 0x29, 0xaa, 0xa5, 0x36, 0x05, 0x95, 0x27, 0x0b, 0x01, 0x16, + 0xbe, 0x62, 0x29, 0xcc, 0x7b, 0x74, 0xe6, 0x76, 0xc8, 0xba, 0xe3, 0x10, 0xe7, 0x4c, 0xae, 0x26, + 0x45, 0x73, 0xb6, 0x13, 0xa9, 0xc4, 0xb1, 0x76, 0xde, 0xd4, 0x5e, 0xe1, 0x41, 0xca, 0xd9, 0xf2, + 0x6d, 0xab, 0x1d, 0x16, 0xaf, 0x98, 0xec, 0x91, 0x80, 0x78, 0x36, 0x41, 0xd7, 0x20, 0x6f, 0x75, + 0xdd, 0x57, 0x03, 0xbf, 0x17, 0x5d, 0x26, 0x8a, 0x75, 0x5b, 0x37, 0xeb, 0x82, 0x86, 0x15, 0x97, + 0x17, 0x2f, 0xfb, 0xae, 0xe7, 0xc8, 0xd5, 0x50, 0xc5, 0xcb, 0xeb, 0xae, 0xe7, 0x60, 0xc1, 0x51, + 0xa5, 0x53, 0x76, 0x54, 0xe9, 0x64, 0xdc, 0x81, 0x62, 0xe2, 0x1b, 0x34, 0x9e, 0xc6, 0x3b, 0xfc, + 0xc1, 0xb4, 0x58, 0x6b, 0x30, 0x8d, 0x6f, 0x47, 0x0c, 0x1c, 0xcb, 0x54, 0xbf, 0xf9, 0xe8, 0xc9, + 0xf2, 0x85, 0x0f, 0x9e, 0x2c, 0x5f, 0x78, 0xfc, 0x64, 0xf9, 0xc2, 0xbb, 0x47, 0xcb, 0xda, 0xa3, + 0xa3, 0x65, 0xed, 0x83, 0xa3, 0x65, 0xed, 0xf1, 0xd1, 0xb2, 0xf6, 0xe1, 0xd1, 0xb2, 0xf6, 0xf3, + 0xbf, 0x2f, 0x5f, 0xf8, 0xc6, 0xda, 0x58, 0x5f, 0x6d, 0xfe, 0x3f, 0x00, 0x00, 0xff, 0xff, 0xaa, + 0x33, 0x1f, 0xcc, 0xed, 0x29, 0x00, 0x00, } func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { @@ -2175,6 +2176,11 @@ func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x2a } } + i -= len(m.ProviderID) + copy(dAtA[i:], m.ProviderID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i-- + dAtA[i] = 0x1a i -= len(m.PodCIDR) copy(dAtA[i:], m.PodCIDR) i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) @@ -3597,6 +3603,8 @@ func (m *NodeSpec) Size() (n int) { _ = l l = len(m.PodCIDR) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) if len(m.Taints) > 0 { for _, e := range m.Taints { l = e.Size() @@ -4307,6 +4315,7 @@ func (this *NodeSpec) String() string { repeatedStringForTaints += "}" s := strings.Join([]string{`&NodeSpec{`, `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, `Taints:` + repeatedStringForTaints + `,`, `PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`, `}`, @@ -6994,6 +7003,38 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } m.PodCIDR = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto index 6317031d5..3b9f1c67d 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/generated.proto @@ -335,6 +335,10 @@ message NodeSpec { // +patchStrategy=merge repeated string podCIDRs = 7; + // ID of the node assigned by the cloud provider in the format: :// + // +optional + optional string providerID = 3; + // If specified, the node's taints. // +optional repeated Taint taints = 5; diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go index 09ce067b0..42ec7a306 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/types.go @@ -965,6 +965,10 @@ type NodeSpec struct { // +patchStrategy=merge PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` + // ID of the node assigned by the cloud provider in the format: :// + // +optional + ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` + // If specified, the node's taints. // +optional Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go index 16ec2d5af..d17fb78c5 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/api/core/v1/zz_generated.deepequal.go @@ -565,6 +565,9 @@ func (in *NodeSpec) DeepEqual(other *NodeSpec) bool { } } + if in.ProviderID != other.ProviderID { + return false + } if ((in.Taints != nil) && (other.Taints != nil)) || ((in.Taints == nil) != (other.Taints == nil)) { in, other := &in.Taints, &other.Taints if other == nil { diff --git a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go index 354f66409..f5b506010 100644 --- a/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go +++ b/vendor/github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1/meta.go @@ -13,8 +13,8 @@ import ( func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } -func (meta *ListMeta) GetSelfLink() string { panic("not implemented") } -func (meta *ListMeta) SetSelfLink(_ string) { panic("not implemented") } +func (meta *ListMeta) GetSelfLink() string { panic("ListMeta - GetSelfLink() not implemented") } +func (meta *ListMeta) SetSelfLink(_ string) { panic("ListMeta - SetSelfLink() not implemented") } func (meta *ListMeta) GetContinue() string { return meta.Continue } func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } @@ -38,23 +38,29 @@ func (obj *ObjectMeta) GetObjectMeta() metav1.Object { return obj } // Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows // fast, direct access to metadata fields for API objects. -func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } -func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } -func (meta *ObjectMeta) GetName() string { return meta.Name } -func (meta *ObjectMeta) SetName(name string) { meta.Name = name } -func (meta *ObjectMeta) GetGenerateName() string { panic("not implemented") } -func (meta *ObjectMeta) SetGenerateName(string) { panic("not implemented") } -func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } -func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } -func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } -func (meta *ObjectMeta) SetResourceVersion(ver string) { meta.ResourceVersion = ver } -func (meta *ObjectMeta) GetGeneration() int64 { panic("not implemented") } -func (meta *ObjectMeta) SetGeneration(_ int64) { panic("not implemented") } -func (meta *ObjectMeta) GetSelfLink() string { panic("not implemented") } -func (meta *ObjectMeta) SetSelfLink(_ string) { panic("not implemented") } -func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { panic("not implemented") } +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { + panic("ObjectMeta - GetGenerateName() not implemented") +} +func (meta *ObjectMeta) SetGenerateName(string) { + panic("ObjectMeta - SetGenerateName() not implemented") +} +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(ver string) { meta.ResourceVersion = ver } +func (meta *ObjectMeta) GetGeneration() int64 { panic("ObjectMeta - GetGeneration() not implemented") } +func (meta *ObjectMeta) SetGeneration(_ int64) { panic("ObjectMeta - SetGeneration() not implemented") } +func (meta *ObjectMeta) GetSelfLink() string { panic("ObjectMeta - GetSelfLink() not implemented") } +func (meta *ObjectMeta) SetSelfLink(_ string) { panic("ObjectMeta - SetSelfLink() not implemented") } +func (meta *ObjectMeta) GetCreationTimestamp() metav1.Time { + panic("ObjectMeta - GetCreationTimestamp() not implemented") +} func (meta *ObjectMeta) SetCreationTimestamp(_ metav1.Time) { - panic("not implemented") + panic("ObjectMeta - SetCreationTimestamp() not implemented") } func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { if meta.DeletionTimestamp == nil { @@ -65,27 +71,36 @@ func (meta *ObjectMeta) GetDeletionTimestamp() *metav1.Time { } } func (meta *ObjectMeta) SetDeletionTimestamp(_ *metav1.Time) { - panic("not implemented") + panic("ObjectMeta - SetDeletionTimestamp() not implemented") } func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { - panic("not implemented") + panic("ObjectMeta - GetDeletionGracePeriodSeconds() not implemented") } func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(_ *int64) { - panic("not implemented") + panic("ObjectMeta - SetDeletionGracePeriodSeconds() not implemented") } func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } -func (meta *ObjectMeta) GetFinalizers() []string { panic("not implemented") } -func (meta *ObjectMeta) SetFinalizers(_ []string) { panic("not implemented") } +func (meta *ObjectMeta) GetFinalizers() []string { + panic("ObjectMeta - GetFinalizers() not implemented") +} +func (meta *ObjectMeta) SetFinalizers(_ []string) { + panic("ObjectMeta - SetFinalizers() not implemented") +} func (meta *ObjectMeta) GetOwnerReferences() []metav1.OwnerReference { return FullOwnerReferences(meta.OwnerReferences) } func (meta *ObjectMeta) SetOwnerReferences(references []metav1.OwnerReference) { meta.OwnerReferences = SlimOwnerReferences(references) } -func (meta *ObjectMeta) GetZZZ_DeprecatedClusterName() string { panic("not implemented") } -func (meta *ObjectMeta) SetZZZ_DeprecatedClusterName(_ string) { panic("not implemented") } -func (meta *ObjectMeta) GetManagedFields() []metav1.ManagedFieldsEntry { panic("not implemented") } -func (meta *ObjectMeta) SetManagedFields(_ []metav1.ManagedFieldsEntry) { panic("not implemented") } +func (meta *ObjectMeta) GetZZZ_DeprecatedClusterName() string { panic("not implemented") } +func (meta *ObjectMeta) SetZZZ_DeprecatedClusterName(_ string) { panic("not implemented") } + +func (meta *ObjectMeta) GetManagedFields() []metav1.ManagedFieldsEntry { + panic("ObjectMeta - GetManagedFields() not implemented") +} +func (meta *ObjectMeta) SetManagedFields(_ []metav1.ManagedFieldsEntry) { + panic("ObjectMeta - SetManagedFields() not implemented") +} diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go index a5f3042da..c4796bf95 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logfields/logfields.go @@ -47,6 +47,9 @@ const ( // Labels are any label, they may not be relevant to the security identity. Labels = "labels" + // Source is the label or node information source + Source = "source" + // Controller is the name of the controller to log it. Controller = "controller" @@ -232,6 +235,9 @@ const ( // Hash is a hash of something Hash = "hash" + // ServerNames is the list of TLS SNIs + ServerNames = "serverNames" + // ServiceName is the orchestration framework name for a service ServiceName = "serviceName" @@ -251,6 +257,9 @@ const ( // ClusterName is the name of the cluster ClusterName = "clusterName" + // ClusterID is the ID of the cluster + ClusterID = "clusterID" + // AddrCluster is a pair of IP address and ClusterID AddrCluster = "addrCluster" @@ -359,6 +368,16 @@ const ( // Tunnel is the tunnel name Tunnel = "tunnel" + // TunnelPeer is the tunnel peer address + TunnelPeer = "tunnelPeer" + + // ConflictingTunnelPeer is the address of a tunnel peer which conflicts + // with TunnelPeer + ConflictingTunnelPeer = "conflictingTunnelPeer" + + // Type is the address type + Type = "type" + // Selector is a selector of any sort: endpoint, CIDR, toFQDNs Selector = "Selector" @@ -539,6 +558,10 @@ const ( // Key is the identity of the encryption key Key = "key" + // ConflictingKey is the identity of the encryption key which conflicts with + // Key + ConflictingKey = "conflictingKey" + // URL represents a Uniform Resource Locator. URL = "url" @@ -630,6 +653,10 @@ const ( // SourceIP is a source IP SourceIP = "sourceIP" + DestinationIP = "destinationIP" + + SourceCIDR = "sourceCIDR" + // DestinationCIDR is a destination CIDR DestinationCIDR = "destinationCIDR" @@ -645,6 +672,13 @@ const ( // Number of Backends failed while restoration. FailedBackends = "failedBackends" + // SkippedBackends is the number of Backends that were skipped during restore + // as duplicates. + SkippedBackends = "skippedBackends" + + // OrphanBackends is the number Backends that are not associated with any services. + OrphanBackends = "orphanBackends" + // Number of Services failed while restoration. RestoredSVCs = "restoredServices" @@ -677,4 +711,19 @@ const ( // User identifies a given user User = "user" + + // CIDRGroupRef is a references to a CiliumCIDRGroup object. + CIDRGroupRef = "cidrGroupRef" + + // Workers represents the number of workers. + Workers = "workers" + + // Event identifies the type of an event. + Event = "event" + + // Prefix identifies a given prefix. + Prefix = "prefix" + + // Value identifies a generic value (e.g., of a key/value pair). + Value = "value" ) diff --git a/vendor/github.com/cilium/cilium/pkg/logging/logging.go b/vendor/github.com/cilium/cilium/pkg/logging/logging.go index 1d96a87b2..ccb7fb135 100644 --- a/vendor/github.com/cilium/cilium/pkg/logging/logging.go +++ b/vendor/github.com/cilium/cilium/pkg/logging/logging.go @@ -12,6 +12,7 @@ import ( "regexp" "strings" "sync/atomic" + "time" "github.com/sirupsen/logrus" "k8s.io/klog/v2" @@ -26,8 +27,9 @@ const ( LevelOpt = "level" FormatOpt = "format" - LogFormatText LogFormat = "text" - LogFormatJSON LogFormat = "json" + LogFormatText LogFormat = "text" + LogFormatJSON LogFormat = "json" + LogFormatJSONTimestamp LogFormat = "json-ts" // DefaultLogFormat is the string representation of the default logrus.Formatter // we want to use (possible values: text or json) @@ -105,10 +107,10 @@ func (o LogOptions) GetLogFormat() LogFormat { } formatOpt = strings.ToLower(formatOpt) - re := regexp.MustCompile(`^(text|json)$`) + re := regexp.MustCompile(`^(text|json|json-ts)$`) if !re.MatchString(formatOpt) { logrus.WithError( - fmt.Errorf("incorrect log format configured '%s', expected 'text' or 'json'", formatOpt), + fmt.Errorf("incorrect log format configured '%s', expected 'text', 'json' or 'json-ts'", formatOpt), ).Warning("Ignoring user-configured log format") return DefaultLogFormat } @@ -203,6 +205,11 @@ func GetFormatter(format LogFormat) logrus.Formatter { return &logrus.JSONFormatter{ DisableTimestamp: true, } + case LogFormatJSONTimestamp: + return &logrus.JSONFormatter{ + DisableTimestamp: false, + TimestampFormat: time.RFC3339Nano, + } } return nil diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go b/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go deleted file mode 100644 index ec5816542..000000000 --- a/vendor/github.com/cilium/cilium/pkg/metrics/bpf.go +++ /dev/null @@ -1,89 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package metrics - -import ( - "context" - "encoding/json" - "fmt" - "os/exec" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" -) - -type bpfCollector struct { - bpfMapsMemory *prometheus.Desc - bpfProgMemory *prometheus.Desc -} - -func newbpfCollector() *bpfCollector { - return &bpfCollector{ - bpfMapsMemory: prometheus.NewDesc( - prometheus.BuildFQName(Namespace, "", "bpf_maps_virtual_memory_max_bytes"), - "BPF maps kernel max memory usage size in bytes.", - nil, nil, - ), - bpfProgMemory: prometheus.NewDesc( - prometheus.BuildFQName(Namespace, "", "bpf_progs_virtual_memory_max_bytes"), - "BPF programs kernel max memory usage size in bytes.", - nil, nil, - ), - } -} - -func (s *bpfCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- s.bpfMapsMemory - ch <- s.bpfProgMemory -} - -type memoryEntry struct { - BytesMemlock uint64 `json:"bytes_memlock"` -} - -func getMemoryUsage(typ string) (uint64, error) { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - cmd := exec.CommandContext(ctx, "bpftool", "-j", typ, "show") - out, err := cmd.Output() - if err != nil { - return 0, fmt.Errorf("unable to get bpftool output: %w", err) - } - - var memoryEntries []memoryEntry - err = json.Unmarshal(out, &memoryEntries) - if err != nil { - return 0, fmt.Errorf("unable to unmarshal bpftool output: %w", err) - } - var totalMem uint64 - for _, entry := range memoryEntries { - totalMem += entry.BytesMemlock - } - return totalMem, nil -} - -func (s *bpfCollector) Collect(ch chan<- prometheus.Metric) { - mapMem, err := getMemoryUsage("map") - if err != nil { - logrus.WithError(err).Error("Error while getting BPF maps memory usage") - } else { - ch <- prometheus.MustNewConstMetric( - s.bpfMapsMemory, - prometheus.GaugeValue, - float64(mapMem), - ) - } - - progMem, err := getMemoryUsage("prog") - if err != nil { - logrus.WithError(err).Error("Error while getting BPF progs memory usage") - } else { - ch <- prometheus.MustNewConstMetric( - s.bpfProgMemory, - prometheus.GaugeValue, - float64(progMem), - ) - } -} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go b/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go deleted file mode 100644 index 56b3be32b..000000000 --- a/vendor/github.com/cilium/cilium/pkg/metrics/interfaces.go +++ /dev/null @@ -1,135 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package metrics - -import ( - "github.com/cilium/cilium/api/v1/client/daemon" - "github.com/cilium/cilium/api/v1/health/client/connectivity" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" -) - -type daemonHealthGetter interface { - GetHealthz(params *daemon.GetHealthzParams, opts ...daemon.ClientOption) (*daemon.GetHealthzOK, error) -} - -type connectivityStatusGetter interface { - GetStatus(params *connectivity.GetStatusParams, opts ...connectivity.ClientOption) (*connectivity.GetStatusOK, error) -} - -type CounterVec interface { - WithLabelValues(lvls ...string) prometheus.Counter - GetMetricWithLabelValues(lvs ...string) (prometheus.Counter, error) - With(labels prometheus.Labels) prometheus.Counter - prometheus.Collector -} - -type GaugeVec interface { - WithLabelValues(lvls ...string) prometheus.Gauge - prometheus.Collector -} - -var ( - NoOpMetric prometheus.Metric = &metric{} - NoOpCollector prometheus.Collector = &collector{} - - NoOpCounter prometheus.Counter = &counter{NoOpMetric, NoOpCollector} - NoOpCounterVec CounterVec = &counterVec{NoOpCollector} - NoOpObserver prometheus.Observer = &observer{} - NoOpObserverVec prometheus.ObserverVec = &observerVec{NoOpCollector} - NoOpGauge prometheus.Gauge = &gauge{NoOpMetric, NoOpCollector} - NoOpGaugeVec GaugeVec = &gaugeVec{NoOpCollector} -) - -// Metric - -type metric struct{} - -// *WARNING*: Desc returns nil so do not register this metric into prometheus -// default register. -func (m *metric) Desc() *prometheus.Desc { return nil } -func (m *metric) Write(*dto.Metric) error { return nil } - -// Collector - -type collector struct{} - -func (c *collector) Describe(chan<- *prometheus.Desc) {} -func (c *collector) Collect(chan<- prometheus.Metric) {} - -// Counter - -type counter struct { - prometheus.Metric - prometheus.Collector -} - -func (cv *counter) Add(float64) {} -func (cv *counter) Inc() {} - -// CounterVec - -type counterVec struct{ prometheus.Collector } - -func (cv *counterVec) WithLabelValues(lvls ...string) prometheus.Counter { return NoOpCounter } - -func (cv *counterVec) GetMetricWithLabelValues(lvs ...string) (prometheus.Counter, error) { - return NoOpCounter, nil -} - -func (cv *counterVec) With(labels prometheus.Labels) prometheus.Counter { return NoOpCounter } - -// Observer - -type observer struct{} - -func (o *observer) Observe(float64) {} - -// ObserverVec - -type observerVec struct { - prometheus.Collector -} - -func (ov *observerVec) GetMetricWith(prometheus.Labels) (prometheus.Observer, error) { - return NoOpObserver, nil -} -func (ov *observerVec) GetMetricWithLabelValues(lvs ...string) (prometheus.Observer, error) { - return NoOpObserver, nil -} - -func (ov *observerVec) With(prometheus.Labels) prometheus.Observer { return NoOpObserver } -func (ov *observerVec) WithLabelValues(...string) prometheus.Observer { return NoOpObserver } - -func (ov *observerVec) CurryWith(prometheus.Labels) (prometheus.ObserverVec, error) { - return NoOpObserverVec, nil -} -func (ov *observerVec) MustCurryWith(prometheus.Labels) prometheus.ObserverVec { - return NoOpObserverVec -} - -// Gauge - -type gauge struct { - prometheus.Metric - prometheus.Collector -} - -func (g *gauge) Set(float64) {} -func (g *gauge) Inc() {} -func (g *gauge) Dec() {} -func (g *gauge) Add(float64) {} -func (g *gauge) Sub(float64) {} -func (g *gauge) SetToCurrentTime() {} - -// GaugeVec - -type gaugeVec struct { - prometheus.Collector -} - -func (gv *gaugeVec) WithLabelValues(lvls ...string) prometheus.Gauge { - return NoOpGauge -} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go b/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go deleted file mode 100644 index acee49922..000000000 --- a/vendor/github.com/cilium/cilium/pkg/metrics/logging_hook.go +++ /dev/null @@ -1,72 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package metrics - -import ( - "fmt" - "reflect" - - "github.com/sirupsen/logrus" - - "github.com/cilium/cilium/pkg/components" - "github.com/cilium/cilium/pkg/logging/logfields" -) - -// LoggingHook is a hook for logrus which counts error and warning messages as a -// Prometheus metric. -type LoggingHook struct { - metric CounterVec -} - -// NewLoggingHook returns a new instance of LoggingHook for the given Cilium -// component. -func NewLoggingHook(component string) *LoggingHook { - // NOTE(mrostecki): For now errors and warning metric exists only for Cilium - // daemon, but support of Prometheus metrics in some other components (i.e. - // cilium-health - GH-4268) is planned. - - // Pick a metric for the component. - var metric CounterVec - switch component { - case components.CiliumAgentName: - metric = ErrorsWarnings - case components.CiliumOperatortName: - metric = ErrorsWarnings - default: - panic(fmt.Sprintf("component %s is unsupported by LoggingHook", component)) - } - - return &LoggingHook{metric: metric} -} - -// Levels returns the list of logging levels on which the hook is triggered. -func (h *LoggingHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.ErrorLevel, - logrus.WarnLevel, - } -} - -// Fire is the main method which is called every time when logger has an error -// or warning message. -func (h *LoggingHook) Fire(entry *logrus.Entry) error { - // Get information about subsystem from logging entry field. - iSubsystem, ok := entry.Data[logfields.LogSubsys] - if !ok { - serializedEntry, err := entry.String() - if err != nil { - return fmt.Errorf("log entry cannot be serialized and doesn't contain 'subsys' field") - } - return fmt.Errorf("log entry doesn't contain 'subsys' field: %s", serializedEntry) - } - subsystem, ok := iSubsystem.(string) - if !ok { - return fmt.Errorf("type of the 'subsystem' log entry field is not string but %s", reflect.TypeOf(iSubsystem)) - } - - // Increment the metric. - h.metric.WithLabelValues(entry.Level.String(), subsystem).Inc() - - return nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go deleted file mode 100644 index 84b487915..000000000 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics.go +++ /dev/null @@ -1,1712 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -// Package metrics holds prometheus metrics objects and related utility functions. It -// does not abstract away the prometheus client but the caller rarely needs to -// refer to prometheus directly. -package metrics - -// Adding a metric -// - Add a metric object of the appropriate type as an exported variable -// - Register the new object in the init function - -import ( - "net/http" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/collectors" - "github.com/prometheus/client_golang/prometheus/promhttp" - dto "github.com/prometheus/client_model/go" - "github.com/sirupsen/logrus" - - "github.com/cilium/cilium/api/v1/models" - "github.com/cilium/cilium/pkg/version" -) - -const ( - // ErrorTimeout is the value used to notify timeout errors. - ErrorTimeout = "timeout" - - // ErrorProxy is the value used to notify errors on Proxy. - ErrorProxy = "proxy" - - // L7DNS is the value used to report DNS label on metrics - L7DNS = "dns" - - // SubsystemBPF is the subsystem to scope metrics related to the bpf syscalls. - SubsystemBPF = "bpf" - - // SubsystemDatapath is the subsystem to scope metrics related to management of - // the datapath. It is prepended to metric names and separated with a '_'. - SubsystemDatapath = "datapath" - - // SubsystemAgent is the subsystem to scope metrics related to the cilium agent itself. - SubsystemAgent = "agent" - - // SubsystemFQDN is the subsystem to scope metrics related to the FQDN proxy. - SubsystemIPCache = "ipcache" - - // SubsystemK8s is the subsystem to scope metrics related to Kubernetes - SubsystemK8s = "k8s" - - // SubsystemK8sClient is the subsystem to scope metrics related to the kubernetes client. - SubsystemK8sClient = "k8s_client" - - // SubsystemKVStore is the subsystem to scope metrics related to the kvstore. - SubsystemKVStore = "kvstore" - - // SubsystemFQDN is the subsystem to scope metrics related to the FQDN proxy. - SubsystemFQDN = "fqdn" - - // SubsystemNodes is the subsystem to scope metrics related to the node manager. - SubsystemNodes = "nodes" - - // SubsystemTriggers is the subsystem to scope metrics related to the trigger package. - SubsystemTriggers = "triggers" - - // SubsystemAPILimiter is the subsystem to scope metrics related to the API limiter package. - SubsystemAPILimiter = "api_limiter" - - // Namespace is used to scope metrics from cilium. It is prepended to metric - // names and separated with a '_' - Namespace = "cilium" - - // LabelError indicates the type of error (string) - LabelError = "error" - - // LabelOutcome indicates whether the outcome of the operation was successful or not - LabelOutcome = "outcome" - - // LabelAttempts is the number of attempts it took to complete the operation - LabelAttempts = "attempts" - - // Labels - - // LabelValueOutcomeSuccess is used as a successful outcome of an operation - LabelValueOutcomeSuccess = "success" - - // LabelValueOutcomeFail is used as an unsuccessful outcome of an operation - LabelValueOutcomeFail = "fail" - - // LabelEventSourceAPI marks event-related metrics that come from the API - LabelEventSourceAPI = "api" - - // LabelEventSourceK8s marks event-related metrics that come from k8s - LabelEventSourceK8s = "k8s" - - // LabelEventSourceFQDN marks event-related metrics that come from pkg/fqdn - LabelEventSourceFQDN = "fqdn" - - // LabelEventSourceContainerd marks event-related metrics that come from docker - LabelEventSourceContainerd = "docker" - - // LabelDatapathArea marks which area the metrics are related to (eg, which BPF map) - LabelDatapathArea = "area" - - // LabelDatapathName marks a unique identifier for this metric. - // The name should be defined once for a given type of error. - LabelDatapathName = "name" - - // LabelDatapathFamily marks which protocol family (IPv4, IPV6) the metric is related to. - LabelDatapathFamily = "family" - - // LabelProtocol marks the L4 protocol (TCP, ANY) for the metric. - LabelProtocol = "protocol" - - // LabelSignalType marks the signal name - LabelSignalType = "signal" - - // LabelSignalData marks the signal data - LabelSignalData = "data" - - // LabelStatus the label from completed task - LabelStatus = "status" - - // LabelPolicyEnforcement is the label used to see the enforcement status - LabelPolicyEnforcement = "enforcement" - - // LabelPolicySource is the label used to see the enforcement status - LabelPolicySource = "source" - - // LabelScope is the label used to defined multiples scopes in the same - // metric. For example, one counter may measure a metric over the scope of - // the entire event (scope=global), or just part of an event - // (scope=slow_path) - LabelScope = "scope" - - // LabelProtocolL7 is the label used when working with layer 7 protocols. - LabelProtocolL7 = "protocol_l7" - - // LabelBuildState is the state a build queue entry is in - LabelBuildState = "state" - - // LabelBuildQueueName is the name of the build queue - LabelBuildQueueName = "name" - - // LabelAction is the label used to defined what kind of action was performed in a metric - LabelAction = "action" - - // LabelSubsystem is the label used to refer to any of the child process - // started by cilium (Envoy, monitor, etc..) - LabelSubsystem = "subsystem" - - // LabelKind is the kind of a label - LabelKind = "kind" - - // LabelEventSource is the source of a label for event metrics - // i.e. k8s, containerd, api. - LabelEventSource = "source" - - // LabelPath is the label for the API path - LabelPath = "path" - // LabelMethod is the label for the HTTP method - LabelMethod = "method" - - // LabelAPIReturnCode is the HTTP code returned for that API path - LabelAPIReturnCode = "return_code" - - // LabelOperation is the label for BPF maps operations - LabelOperation = "operation" - - // LabelMapName is the label for the BPF map name - LabelMapName = "map_name" - - // LabelVersion is the label for the version number - LabelVersion = "version" - - // LabelVersionRevision is the label for the version revision - LabelVersionRevision = "revision" - - // LabelArch is the label for the platform architecture (e.g. linux/amd64) - LabelArch = "arch" - - // LabelDirection is the label for traffic direction - LabelDirection = "direction" - - // LabelSourceCluster is the label for source cluster name - LabelSourceCluster = "source_cluster" - - // LabelSourceNodeName is the label for source node name - LabelSourceNodeName = "source_node_name" - - // LabelTargetCluster is the label for target cluster name - LabelTargetCluster = "target_cluster" - - // LabelTargetNodeIP is the label for target node IP - LabelTargetNodeIP = "target_node_ip" - - // LabelTargetNodeName is the label for target node name - LabelTargetNodeName = "target_node_name" - - // LabelTargetNodeType is the label for target node type (local_node, remote_intra_cluster, vs remote_inter_cluster) - LabelTargetNodeType = "target_node_type" - - LabelLocationLocalNode = "local_node" - LabelLocationRemoteIntraCluster = "remote_intra_cluster" - LabelLocationRemoteInterCluster = "remote_inter_cluster" - - // LabelType is the label for type in general (e.g. endpoint, node) - LabelType = "type" - LabelPeerEndpoint = "endpoint" - LabelPeerNode = "node" - - LabelTrafficHTTP = "http" - LabelTrafficICMP = "icmp" - - LabelAddressType = "address_type" - LabelAddressTypePrimary = "primary" - LabelAddressTypeSecondary = "secondary" -) - -var ( - registry = prometheus.NewPedanticRegistry() - - // BootstrapTimes is the durations of cilium-agent bootstrap sequence. - BootstrapTimes = NoOpObserverVec - - // APIInteractions is the total time taken to process an API call made - // to the cilium-agent - APIInteractions = NoOpObserverVec - - // Status - - // NodeConnectivityStatus is the connectivity status between local node to - // other node intra or inter cluster. - NodeConnectivityStatus = NoOpGaugeVec - - // NodeConnectivityLatency is the connectivity latency between local node to - // other node intra or inter cluster. - NodeConnectivityLatency = NoOpGaugeVec - - // Endpoint - - // Endpoint is a function used to collect this metric. - // It must be thread-safe. - Endpoint prometheus.GaugeFunc - - // EndpointRegenerationTotal is a count of the number of times any endpoint - // has been regenerated and success/fail outcome - EndpointRegenerationTotal = NoOpCounterVec - - // EndpointStateCount is the total count of the endpoints in various states. - EndpointStateCount = NoOpGaugeVec - - // EndpointRegenerationTimeStats is the total time taken to regenerate - // endpoints, labeled by span name and status ("success" or "failure") - EndpointRegenerationTimeStats = NoOpObserverVec - - // EndpointPropagationDelay is the delay between creation of local CiliumEndpoint - // and update for that CiliumEndpoint received through CiliumEndpointSlice. - // Measure of local CEP roundtrip time with CiliumEndpointSlice feature enabled. - EndpointPropagationDelay = NoOpObserverVec - - // Policy - // Policy is the number of policies loaded into the agent - Policy = NoOpGauge - - // PolicyRegenerationCount is the total number of successful policy - // regenerations. - PolicyRegenerationCount = NoOpCounter - - // PolicyRegenerationTimeStats is the total time taken to generate policies - PolicyRegenerationTimeStats = NoOpObserverVec - - // PolicyRevision is the current policy revision number for this agent - PolicyRevision = NoOpGauge - - // PolicyImportErrorsTotal is a count of failed policy imports. - // This metric was deprecated in Cilium 1.14 and is to be removed in 1.15. - // It is replaced by PolicyChangeTotal metric. - PolicyImportErrorsTotal = NoOpCounter - - // PolicyChangeTotal is a count of policy changes by outcome ("success" or - // "failure") - PolicyChangeTotal = NoOpCounterVec - - // PolicyEndpointStatus is the number of endpoints with policy labeled by enforcement type - PolicyEndpointStatus = NoOpGaugeVec - - // PolicyImplementationDelay is a distribution of times taken from adding a - // policy (and incrementing the policy revision) to seeing it in the datapath - // per Endpoint. This reflects the actual delay perceived by traffic flowing - // through the datapath. The longest times will roughly correlate with the - // time taken to fully deploy an endpoint. - PolicyImplementationDelay = NoOpObserverVec - - // Identity - - // Identity is the number of identities currently in use on the node by type - Identity = NoOpGaugeVec - - // Events - - // EventTS*is the time in seconds since epoch that we last received an - // event that we will handle - // source is one of k8s, docker or apia - - // EventTS is the timestamp of k8s resource events. - EventTS = NoOpGaugeVec - - // EventLagK8s is the lag calculation for k8s Pod events. - EventLagK8s = NoOpGauge - - // L7 statistics - - // ProxyRedirects is the number of redirects labeled by protocol - ProxyRedirects = NoOpGaugeVec - - // ProxyPolicyL7Total is a count of all l7 requests handled by proxy - ProxyPolicyL7Total = NoOpCounterVec - - // ProxyParseErrors is a count of failed parse errors on proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyParseErrors = NoOpCounter - - // ProxyForwarded is a count of all forwarded requests by proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyForwarded = NoOpCounter - - // ProxyDenied is a count of all denied requests by policy by the proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyDenied = NoOpCounter - - // ProxyReceived is a count of all received requests by the proxy - // Deprecated: in favor of ProxyPolicyL7Total - ProxyReceived = NoOpCounter - - // ProxyUpstreamTime is how long the upstream server took to reply labeled - // by error, protocol and span time - ProxyUpstreamTime = NoOpObserverVec - - // ProxyDatapathUpdateTimeout is a count of all the timeouts encountered while - // updating the datapath due to an FQDN IP update - ProxyDatapathUpdateTimeout = NoOpCounter - - // L3-L4 statistics - - // DropCount is the total drop requests, - // tagged by drop reason and direction(ingress/egress) - DropCount = NoOpCounterVec - - // DropBytes is the total dropped bytes, - // tagged by drop reason and direction(ingress/egress) - DropBytes = NoOpCounterVec - - // ForwardCount is the total forwarded packets, - // tagged by ingress/egress direction - ForwardCount = NoOpCounterVec - - // ForwardBytes is the total forwarded bytes, - // tagged by ingress/egress direction - ForwardBytes = NoOpCounterVec - - // Datapath statistics - - // ConntrackGCRuns is the number of times that the conntrack GC - // process was run. - ConntrackGCRuns = NoOpCounterVec - - // ConntrackGCKeyFallbacks number of times that the conntrack key fallback was invalid. - ConntrackGCKeyFallbacks = NoOpCounterVec - - // ConntrackGCSize the number of entries in the conntrack table - ConntrackGCSize = NoOpGaugeVec - - // NatGCSize the number of entries in the nat table - NatGCSize = NoOpGaugeVec - - // ConntrackGCDuration the duration of the conntrack GC process in milliseconds. - ConntrackGCDuration = NoOpObserverVec - - // ConntrackDumpReset marks the count for conntrack dump resets - ConntrackDumpResets = NoOpCounterVec - - // Signals - - // SignalsHandled is the number of signals received. - SignalsHandled = NoOpCounterVec - - // Services - - // ServicesCount number of services - ServicesCount = NoOpCounterVec - - // Errors and warnings - - // ErrorsWarnings is the number of errors and warnings in cilium-agent instances - ErrorsWarnings = NoOpCounterVec - - // ControllerRuns is the number of times that a controller process runs. - ControllerRuns = NoOpCounterVec - - // ControllerRunsDuration the duration of the controller process in seconds - ControllerRunsDuration = NoOpObserverVec - - // subprocess, labeled by Subsystem - SubprocessStart = NoOpCounterVec - - // Kubernetes Events - - // KubernetesEventProcessed is the number of Kubernetes events - // processed labeled by scope, action and execution result - KubernetesEventProcessed = NoOpCounterVec - - // KubernetesEventReceived is the number of Kubernetes events received - // labeled by scope, action, valid data and equalness. - KubernetesEventReceived = NoOpCounterVec - - // Kubernetes interactions - - // KubernetesAPIInteractions is the total time taken to process an API call made - // to the kube-apiserver - KubernetesAPIInteractions = NoOpObserverVec - - // KubernetesAPICallsTotal is the counter for all API calls made to - // kube-apiserver. - KubernetesAPICallsTotal = NoOpCounterVec - - // KubernetesCNPStatusCompletion is the number of seconds it takes to - // complete a CNP status update - KubernetesCNPStatusCompletion = NoOpObserverVec - - // TerminatingEndpointsEvents is the number of terminating endpoint events received from kubernetes. - TerminatingEndpointsEvents = NoOpCounter - - // IPAM events - - // IpamEvent is the number of IPAM events received labeled by action and - // datapath family type - IpamEvent = NoOpCounterVec - - // KVstore events - - // KVStoreOperationsDuration records the duration of kvstore operations - KVStoreOperationsDuration = NoOpObserverVec - - // KVStoreEventsQueueDuration records the duration in seconds of time - // received event was blocked before it could be queued - KVStoreEventsQueueDuration = NoOpObserverVec - - // KVStoreQuorumErrors records the number of kvstore quorum errors - KVStoreQuorumErrors = NoOpCounterVec - - // FQDNGarbageCollectorCleanedTotal is the number of domains cleaned by the - // GC job. - FQDNGarbageCollectorCleanedTotal = NoOpCounter - - // FQDNActiveNames is the number of domains inside the DNS cache that have - // not expired (by TTL), per endpoint. - FQDNActiveNames = NoOpGaugeVec - - // FQDNActiveIPs is the number of IPs inside the DNS cache associated with - // a domain that has not expired (by TTL) and are currently active, per - // endpoint. - FQDNActiveIPs = NoOpGaugeVec - - // FQDNAliveZombieConnections is the number IPs associated with domains - // that have expired (by TTL) yet still associated with an active - // connection (aka zombie), per endpoint. - FQDNAliveZombieConnections = NoOpGaugeVec - - // FQDNSemaphoreRejectedTotal is the total number of DNS requests rejected - // by the DNS proxy because too many requests were in flight, as enforced by - // the admission semaphore. - FQDNSemaphoreRejectedTotal = NoOpCounter - - // IPCacheErrorsTotal is the total number of IPCache events handled in - // the IPCache subsystem that resulted in errors. - IPCacheErrorsTotal = NoOpCounterVec - - // IPCacheEventsTotal is the total number of IPCache events handled in - // the IPCache subsystem. - IPCacheEventsTotal = NoOpCounterVec - - // BPFSyscallDuration is the metric for bpf syscalls duration. - BPFSyscallDuration = NoOpObserverVec - - // BPFMapOps is the metric to measure the number of operations done to a - // bpf map. - BPFMapOps = NoOpCounterVec - - // TriggerPolicyUpdateTotal is the metric to count total number of - // policy update triggers - TriggerPolicyUpdateTotal = NoOpCounterVec - - // TriggerPolicyUpdateFolds is the current level folding that is - // happening when running policy update triggers - TriggerPolicyUpdateFolds = NoOpGauge - - // TriggerPolicyUpdateCallDuration measures the latency and call - // duration of policy update triggers - TriggerPolicyUpdateCallDuration = NoOpObserverVec - - // VersionMetric labelled by Cilium version - VersionMetric = NoOpGaugeVec - - // APILimiterWaitHistoryDuration is a histogram that measures the - // individual wait durations of API limiters - APILimiterWaitHistoryDuration = NoOpObserverVec - - // APILimiterWaitDuration is the gauge of the current mean, min, and - // max wait duration - APILimiterWaitDuration = NoOpGaugeVec - - // APILimiterProcessingDuration is the gauge of the mean and estimated - // processing duration - APILimiterProcessingDuration = NoOpGaugeVec - - // APILimiterRequestsInFlight is the gauge of the current and max - // requests in flight - APILimiterRequestsInFlight = NoOpGaugeVec - - // APILimiterRateLimit is the gauge of the current rate limiting - // configuration including limit and burst - APILimiterRateLimit = NoOpGaugeVec - - // APILimiterAdjustmentFactor is the gauge representing the latest - // adjustment factor that was applied - APILimiterAdjustmentFactor = NoOpGaugeVec - - // APILimiterProcessedRequests is the counter of the number of - // processed (successful and failed) requests - APILimiterProcessedRequests = NoOpCounterVec -) - -type Configuration struct { - BootstrapTimesEnabled bool - APIInteractionsEnabled bool - NodeConnectivityStatusEnabled bool - NodeConnectivityLatencyEnabled bool - EndpointRegenerationCountEnabled bool - EndpointStateCountEnabled bool - EndpointRegenerationTimeStatsEnabled bool - EndpointPropagationDelayEnabled bool - PolicyCountEnabled bool - PolicyRegenerationCountEnabled bool - PolicyRegenerationTimeStatsEnabled bool - PolicyRevisionEnabled bool - PolicyImportErrorsEnabled bool - PolicyChangeTotalEnabled bool - PolicyEndpointStatusEnabled bool - PolicyImplementationDelayEnabled bool - IdentityCountEnabled bool - EventTSEnabled bool - EventLagK8sEnabled bool - EventTSContainerdEnabled bool - EventTSAPIEnabled bool - ProxyRedirectsEnabled bool - ProxyPolicyL7Enabled bool - ProxyParseErrorsEnabled bool - ProxyForwardedEnabled bool - ProxyDeniedEnabled bool - ProxyReceivedEnabled bool - ProxyDatapathUpdateTimeoutEnabled bool - NoOpObserverVecEnabled bool - DropCountEnabled bool - DropBytesEnabled bool - NoOpCounterVecEnabled bool - ForwardBytesEnabled bool - ConntrackGCRunsEnabled bool - ConntrackGCKeyFallbacksEnabled bool - ConntrackGCSizeEnabled bool - ConntrackGCDurationEnabled bool - ConntrackDumpResetsEnabled bool - SignalsHandledEnabled bool - ServicesCountEnabled bool - ErrorsWarningsEnabled bool - ControllerRunsEnabled bool - ControllerRunsDurationEnabled bool - SubprocessStartEnabled bool - KubernetesEventProcessedEnabled bool - KubernetesEventReceivedEnabled bool - KubernetesTimeBetweenEventsEnabled bool - KubernetesAPIInteractionsEnabled bool - KubernetesAPICallsEnabled bool - KubernetesCNPStatusCompletionEnabled bool - KubernetesTerminatingEndpointsEnabled bool - IpamEventEnabled bool - IPCacheErrorsTotalEnabled bool - IPCacheEventsTotalEnabled bool - KVStoreOperationsDurationEnabled bool - KVStoreEventsQueueDurationEnabled bool - KVStoreQuorumErrorsEnabled bool - FQDNGarbageCollectorCleanedTotalEnabled bool - FQDNActiveNames bool - FQDNActiveIPs bool - FQDNActiveZombiesConnections bool - FQDNSemaphoreRejectedTotal bool - BPFSyscallDurationEnabled bool - BPFMapOps bool - BPFMapPressure bool - TriggerPolicyUpdateTotal bool - TriggerPolicyUpdateFolds bool - TriggerPolicyUpdateCallDuration bool - VersionMetric bool - APILimiterWaitHistoryDuration bool - APILimiterWaitDuration bool - APILimiterProcessingDuration bool - APILimiterRequestsInFlight bool - APILimiterRateLimit bool - APILimiterAdjustmentFactor bool - APILimiterProcessedRequests bool -} - -func DefaultMetrics() map[string]struct{} { - return map[string]struct{}{ - Namespace + "_" + SubsystemAgent + "_bootstrap_seconds": {}, - Namespace + "_" + SubsystemAgent + "_api_process_time_seconds": {}, - Namespace + "_endpoint_regenerations_total": {}, - Namespace + "_endpoint_state": {}, - Namespace + "_endpoint_regeneration_time_stats_seconds": {}, - Namespace + "_policy": {}, - Namespace + "_policy_regeneration_total": {}, - Namespace + "_policy_regeneration_time_stats_seconds": {}, - Namespace + "_policy_max_revision": {}, - Namespace + "_policy_import_errors_total": {}, - Namespace + "_policy_change_total": {}, - Namespace + "_policy_endpoint_enforcement_status": {}, - Namespace + "_policy_implementation_delay": {}, - Namespace + "_identity": {}, - Namespace + "_event_ts": {}, - Namespace + "_proxy_redirects": {}, - Namespace + "_policy_l7_total": {}, - Namespace + "_policy_l7_parse_errors_total": {}, - Namespace + "_policy_l7_forwarded_total": {}, - Namespace + "_policy_l7_denied_total": {}, - Namespace + "_policy_l7_received_total": {}, - Namespace + "_proxy_upstream_reply_seconds": {}, - Namespace + "_drop_count_total": {}, - Namespace + "_drop_bytes_total": {}, - Namespace + "_forward_count_total": {}, - Namespace + "_forward_bytes_total": {}, - Namespace + "_endpoint_propagation_delay_seconds": {}, - Namespace + "_node_connectivity_status": {}, - Namespace + "_node_connectivity_latency_seconds": {}, - Namespace + "_" + SubsystemDatapath + "_conntrack_dump_resets_total": {}, - Namespace + "_" + SubsystemDatapath + "_conntrack_gc_runs_total": {}, - Namespace + "_" + SubsystemDatapath + "_conntrack_gc_key_fallbacks_total": {}, - Namespace + "_" + SubsystemDatapath + "_conntrack_gc_entries": {}, - Namespace + "_" + SubsystemDatapath + "_conntrack_gc_duration_seconds": {}, - Namespace + "_" + SubsystemDatapath + "_signals_handled_total": {}, - Namespace + "_services_events_total": {}, - Namespace + "_errors_warnings_total": {}, - Namespace + "_controllers_runs_total": {}, - Namespace + "_controllers_runs_duration_seconds": {}, - Namespace + "_subprocess_start_total": {}, - Namespace + "_kubernetes_events_total": {}, - Namespace + "_kubernetes_events_received_total": {}, - Namespace + "_" + SubsystemK8sClient + "_api_latency_time_seconds": {}, - Namespace + "_" + SubsystemK8sClient + "_api_calls_total": {}, - Namespace + "_" + SubsystemK8s + "_cnp_status_completion_seconds": {}, - Namespace + "_" + SubsystemK8s + "_terminating_endpoints_events_total": {}, - Namespace + "_ipam_events_total": {}, - Namespace + "_" + SubsystemKVStore + "_operations_duration_seconds": {}, - Namespace + "_" + SubsystemKVStore + "_events_queue_seconds": {}, - Namespace + "_" + SubsystemKVStore + "_quorum_errors_total": {}, - Namespace + "_" + SubsystemIPCache + "_errors_total": {}, - Namespace + "_" + SubsystemFQDN + "_gc_deletions_total": {}, - Namespace + "_" + SubsystemBPF + "_map_ops_total": {}, - Namespace + "_" + SubsystemTriggers + "_policy_update_total": {}, - Namespace + "_" + SubsystemTriggers + "_policy_update_folds": {}, - Namespace + "_" + SubsystemTriggers + "_policy_update_call_duration_seconds": {}, - Namespace + "_version": {}, - Namespace + "_" + SubsystemAPILimiter + "_wait_duration_seconds": {}, - Namespace + "_" + SubsystemAPILimiter + "_processing_duration_seconds": {}, - Namespace + "_" + SubsystemAPILimiter + "_requests_in_flight": {}, - Namespace + "_" + SubsystemAPILimiter + "_rate_limit": {}, - Namespace + "_" + SubsystemAPILimiter + "_adjustment_factor": {}, - Namespace + "_" + SubsystemAPILimiter + "_processed_requests_total": {}, - } -} - -// CreateConfiguration returns a Configuration with all metrics that are -// considered enabled from the given slice of metricsEnabled as well as a slice -// of prometheus.Collectors that must be registered in the prometheus default -// register. -func CreateConfiguration(metricsEnabled []string) (Configuration, []prometheus.Collector) { - var collectors []prometheus.Collector - c := Configuration{} - - for _, metricName := range metricsEnabled { - switch metricName { - default: - logrus.WithField("metric", metricName).Warning("Metric does not exist, skipping") - - case Namespace + "_" + SubsystemAgent + "_bootstrap_seconds": - BootstrapTimes = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemAgent, - Name: "bootstrap_seconds", - Help: "Duration of bootstrap sequence", - }, []string{LabelScope, LabelOutcome}) - - collectors = append(collectors, BootstrapTimes) - c.BootstrapTimesEnabled = true - - case Namespace + "_" + SubsystemAgent + "_api_process_time_seconds": - APIInteractions = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemAgent, - Name: "api_process_time_seconds", - Help: "Duration of processed API calls labeled by path, method and return code.", - }, []string{LabelPath, LabelMethod, LabelAPIReturnCode}) - - collectors = append(collectors, APIInteractions) - c.APIInteractionsEnabled = true - - case Namespace + "_endpoint_regenerations_total": - EndpointRegenerationTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "endpoint_regenerations_total", - Help: "Count of all endpoint regenerations that have completed, tagged by outcome", - }, []string{"outcome"}) - - collectors = append(collectors, EndpointRegenerationTotal) - c.EndpointRegenerationCountEnabled = true - - case Namespace + "_endpoint_state": - EndpointStateCount = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "endpoint_state", - Help: "Count of all endpoints, tagged by different endpoint states", - }, - []string{"endpoint_state"}, - ) - - collectors = append(collectors, EndpointStateCount) - c.EndpointStateCountEnabled = true - - case Namespace + "_endpoint_regeneration_time_stats_seconds": - EndpointRegenerationTimeStats = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Name: "endpoint_regeneration_time_stats_seconds", - Help: "Endpoint regeneration time stats labeled by the scope", - }, []string{LabelScope, LabelStatus}) - - collectors = append(collectors, EndpointRegenerationTimeStats) - c.EndpointRegenerationTimeStatsEnabled = true - - case Namespace + "_policy": - Policy = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "policy", - Help: "Number of policies currently loaded", - }) - - collectors = append(collectors, Policy) - c.PolicyCountEnabled = true - - case Namespace + "_policy_regeneration_total": - PolicyRegenerationCount = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_regeneration_total", - Help: "Total number of successful policy regenerations", - }) - - collectors = append(collectors, PolicyRegenerationCount) - c.PolicyRegenerationCountEnabled = true - - case Namespace + "_policy_regeneration_time_stats_seconds": - PolicyRegenerationTimeStats = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Name: "policy_regeneration_time_stats_seconds", - Help: "Policy regeneration time stats labeled by the scope", - }, []string{LabelScope, LabelStatus}) - - collectors = append(collectors, PolicyRegenerationTimeStats) - c.PolicyRegenerationTimeStatsEnabled = true - - case Namespace + "_policy_max_revision": - PolicyRevision = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "policy_max_revision", - Help: "Highest policy revision number in the agent", - }) - - collectors = append(collectors, PolicyRevision) - c.PolicyRegenerationTimeStatsEnabled = true - - case Namespace + "_policy_import_errors_total": - PolicyImportErrorsTotal = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_import_errors_total", - Help: "Number of times a policy import has failed", - }) - - collectors = append(collectors, PolicyImportErrorsTotal) - c.PolicyImportErrorsEnabled = true - - case Namespace + "_policy_change_total": - PolicyChangeTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_change_total", - Help: "Number of policy changes by outcome", - }, []string{"outcome"}) - - collectors = append(collectors, PolicyChangeTotal) - c.PolicyChangeTotalEnabled = true - - case Namespace + "_policy_endpoint_enforcement_status": - PolicyEndpointStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "policy_endpoint_enforcement_status", - Help: "Number of endpoints labeled by policy enforcement status", - }, []string{LabelPolicyEnforcement}) - - collectors = append(collectors, PolicyEndpointStatus) - c.PolicyEndpointStatusEnabled = true - - case Namespace + "_policy_implementation_delay": - PolicyImplementationDelay = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Name: "policy_implementation_delay", - Help: "Time between a policy change and it being fully deployed into the datapath", - }, []string{LabelPolicySource}) - - collectors = append(collectors, PolicyImplementationDelay) - c.PolicyImplementationDelayEnabled = true - - case Namespace + "_identity": - Identity = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "identity", - Help: "Number of identities currently allocated", - }, []string{LabelType}) - - collectors = append(collectors, Identity) - c.IdentityCountEnabled = true - - case Namespace + "_event_ts": - EventTS = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "event_ts", - Help: "Last timestamp when we received an event", - }, []string{LabelEventSource, LabelScope, LabelAction}) - - collectors = append(collectors, EventTS) - c.EventTSEnabled = true - - EventLagK8s = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "k8s_event_lag_seconds", - Help: "Lag for Kubernetes events - computed value between receiving a CNI ADD event from kubelet and a Pod event received from kube-api-server", - ConstLabels: prometheus.Labels{"source": LabelEventSourceK8s}, - }) - - collectors = append(collectors, EventLagK8s) - c.EventLagK8sEnabled = true - - case Namespace + "_proxy_redirects": - ProxyRedirects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "proxy_redirects", - Help: "Number of redirects installed for endpoints, labeled by protocol", - }, []string{LabelProtocolL7}) - - collectors = append(collectors, ProxyRedirects) - c.ProxyRedirectsEnabled = true - - case Namespace + "_policy_l7_total": - ProxyPolicyL7Total = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_l7_total", - Help: "Number of total proxy requests handled", - }, []string{"rule"}) - - collectors = append(collectors, ProxyPolicyL7Total) - c.ProxyPolicyL7Enabled = true - - case Namespace + "_policy_l7_parse_errors_total": - ProxyParseErrors = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_l7_parse_errors_total", - Help: "Number of total L7 parse errors", - }) - - collectors = append(collectors, ProxyParseErrors) - c.ProxyParseErrorsEnabled = true - - case Namespace + "_policy_l7_forwarded_total": - ProxyForwarded = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_l7_forwarded_total", - Help: "Number of total L7 forwarded requests/responses", - }) - - collectors = append(collectors, ProxyForwarded) - c.ProxyForwardedEnabled = true - - case Namespace + "_policy_l7_denied_total": - ProxyDenied = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_l7_denied_total", - Help: "Number of total L7 denied requests/responses due to policy", - }) - - collectors = append(collectors, ProxyDenied) - c.ProxyDeniedEnabled = true - - case Namespace + "_policy_l7_received_total": - ProxyReceived = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "policy_l7_received_total", - Help: "Number of total L7 received requests/responses", - }) - - collectors = append(collectors, ProxyReceived) - c.ProxyReceivedEnabled = true - - case Namespace + "_proxy_upstream_reply_seconds": - ProxyUpstreamTime = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Name: "proxy_upstream_reply_seconds", - Help: "Seconds waited to get a reply from a upstream server", - }, []string{"error", LabelProtocolL7, LabelScope}) - - collectors = append(collectors, ProxyUpstreamTime) - c.NoOpObserverVecEnabled = true - - case Namespace + "_proxy_datapath_update_timeout_total": - ProxyDatapathUpdateTimeout = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "proxy_datapath_update_timeout_total", - Help: "Number of total datapath update timeouts due to FQDN IP updates", - }) - - collectors = append(collectors, ProxyDatapathUpdateTimeout) - c.ProxyDatapathUpdateTimeoutEnabled = true - - case Namespace + "_drop_count_total": - DropCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "drop_count_total", - Help: "Total dropped packets, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}) - - collectors = append(collectors, DropCount) - c.DropCountEnabled = true - - case Namespace + "_drop_bytes_total": - DropBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "drop_bytes_total", - Help: "Total dropped bytes, tagged by drop reason and ingress/egress direction", - }, - []string{"reason", LabelDirection}) - - collectors = append(collectors, DropBytes) - c.DropBytesEnabled = true - - case Namespace + "_forward_count_total": - ForwardCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "forward_count_total", - Help: "Total forwarded packets, tagged by ingress/egress direction", - }, - []string{LabelDirection}) - - collectors = append(collectors, ForwardCount) - c.NoOpCounterVecEnabled = true - - case Namespace + "_forward_bytes_total": - ForwardBytes = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "forward_bytes_total", - Help: "Total forwarded bytes, tagged by ingress/egress direction", - }, - []string{LabelDirection}) - - collectors = append(collectors, ForwardBytes) - c.ForwardBytesEnabled = true - - case Namespace + "_" + SubsystemDatapath + "_conntrack_gc_runs_total": - ConntrackGCRuns = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemDatapath, - Name: "conntrack_gc_runs_total", - Help: "Number of times that the conntrack garbage collector process was run " + - "labeled by completion status", - }, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}) - - collectors = append(collectors, ConntrackGCRuns) - c.ConntrackGCRunsEnabled = true - - case Namespace + "_" + SubsystemDatapath + "_conntrack_gc_key_fallbacks_total": - ConntrackGCKeyFallbacks = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemDatapath, - Name: "conntrack_gc_key_fallbacks_total", - Help: "Number of times a key fallback was needed when iterating over the BPF map", - }, []string{LabelDatapathFamily, LabelProtocol}) - - collectors = append(collectors, ConntrackGCKeyFallbacks) - c.ConntrackGCKeyFallbacksEnabled = true - - case Namespace + "_" + SubsystemDatapath + "_conntrack_gc_entries": - ConntrackGCSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemDatapath, - Name: "conntrack_gc_entries", - Help: "The number of alive and deleted conntrack entries at the end " + - "of a garbage collector run labeled by datapath family.", - }, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}) - - collectors = append(collectors, ConntrackGCSize) - c.ConntrackGCSizeEnabled = true - - NatGCSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemDatapath, - Name: "nat_gc_entries", - Help: "The number of alive and deleted nat entries at the end " + - "of a garbage collector run labeled by datapath family.", - }, []string{LabelDatapathFamily, LabelDirection, LabelStatus}) - - collectors = append(collectors, NatGCSize) - - case Namespace + "_" + SubsystemDatapath + "_conntrack_gc_duration_seconds": - ConntrackGCDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemDatapath, - Name: "conntrack_gc_duration_seconds", - Help: "Duration in seconds of the garbage collector process " + - "labeled by datapath family and completion status", - }, []string{LabelDatapathFamily, LabelProtocol, LabelStatus}) - - collectors = append(collectors, ConntrackGCDuration) - c.ConntrackGCDurationEnabled = true - - case Namespace + "_" + SubsystemDatapath + "_conntrack_dump_resets_total": - ConntrackDumpResets = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemDatapath, - Name: "conntrack_dump_resets_total", - Help: "Number of conntrack dump resets. Happens when a BPF entry gets removed while dumping the map is in progress", - }, []string{LabelDatapathArea, LabelDatapathName, LabelDatapathFamily}) - - collectors = append(collectors, ConntrackDumpResets) - c.ConntrackDumpResetsEnabled = true - - case Namespace + "_" + SubsystemDatapath + "_signals_handled_total": - SignalsHandled = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemDatapath, - Name: "signals_handled_total", - Help: "Number of times that the datapath signal handler process was run " + - "labeled by signal type, data and completion status", - }, []string{LabelSignalType, LabelSignalData, LabelStatus}) - - collectors = append(collectors, SignalsHandled) - c.SignalsHandledEnabled = true - - case Namespace + "_services_events_total": - ServicesCount = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "services_events_total", - Help: "Number of services events labeled by action type", - }, []string{LabelAction}) - - collectors = append(collectors, ServicesCount) - c.ServicesCountEnabled = true - - case Namespace + "_errors_warnings_total": - ErrorsWarnings = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "errors_warnings_total", - Help: "Number of total errors in cilium-agent instances", - }, []string{"level", "subsystem"}) - - collectors = append(collectors, ErrorsWarnings) - c.ErrorsWarningsEnabled = true - - case Namespace + "_controllers_runs_total": - ControllerRuns = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "controllers_runs_total", - Help: "Number of times that a controller process was run labeled by completion status", - }, []string{LabelStatus}) - - collectors = append(collectors, ControllerRuns) - c.ControllerRunsEnabled = true - - case Namespace + "_controllers_runs_duration_seconds": - ControllerRunsDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Name: "controllers_runs_duration_seconds", - Help: "Duration in seconds of the controller process labeled by completion status", - }, []string{LabelStatus}) - - collectors = append(collectors, ControllerRunsDuration) - c.ControllerRunsDurationEnabled = true - - case Namespace + "_subprocess_start_total": - SubprocessStart = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "subprocess_start_total", - Help: "Number of times that Cilium has started a subprocess, labeled by subsystem", - }, []string{LabelSubsystem}) - - collectors = append(collectors, SubprocessStart) - c.SubprocessStartEnabled = true - - case Namespace + "_kubernetes_events_total": - KubernetesEventProcessed = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "kubernetes_events_total", - Help: "Number of Kubernetes events processed labeled by scope, action and execution result", - }, []string{LabelScope, LabelAction, LabelStatus}) - - collectors = append(collectors, KubernetesEventProcessed) - c.KubernetesEventProcessedEnabled = true - - case Namespace + "_kubernetes_events_received_total": - KubernetesEventReceived = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "kubernetes_events_received_total", - Help: "Number of Kubernetes events received labeled by scope, action, valid data and equalness", - }, []string{LabelScope, LabelAction, "valid", "equal"}) - - collectors = append(collectors, KubernetesEventReceived) - c.KubernetesEventReceivedEnabled = true - - case Namespace + "_" + SubsystemK8sClient + "_api_latency_time_seconds": - KubernetesAPIInteractions = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemK8sClient, - Name: "api_latency_time_seconds", - Help: "Duration of processed API calls labeled by path and method.", - }, []string{LabelPath, LabelMethod}) - - collectors = append(collectors, KubernetesAPIInteractions) - c.KubernetesAPIInteractionsEnabled = true - - case Namespace + "_" + SubsystemK8sClient + "_api_calls_total": - KubernetesAPICallsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemK8sClient, - Name: "api_calls_total", - Help: "Number of API calls made to kube-apiserver labeled by host, method and return code.", - }, []string{"host", LabelMethod, LabelAPIReturnCode}) - - collectors = append(collectors, KubernetesAPICallsTotal) - c.KubernetesAPICallsEnabled = true - - case Namespace + "_" + SubsystemK8s + "_cnp_status_completion_seconds": - KubernetesCNPStatusCompletion = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemK8s, - Name: "cnp_status_completion_seconds", - Help: "Duration in seconds in how long it took to complete a CNP status update", - }, []string{LabelAttempts, LabelOutcome}) - - collectors = append(collectors, KubernetesCNPStatusCompletion) - c.KubernetesCNPStatusCompletionEnabled = true - - case Namespace + "_" + SubsystemK8s + "_terminating_endpoints_events_total": - TerminatingEndpointsEvents = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemK8s, - Name: "terminating_endpoints_events_total", - Help: "Number of terminating endpoint events received from Kubernetes", - }) - - collectors = append(collectors, TerminatingEndpointsEvents) - c.KubernetesTerminatingEndpointsEnabled = true - - case Namespace + "_ipam_events_total": - IpamEvent = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Name: "ipam_events_total", - Help: "Number of IPAM events received labeled by action and datapath family type", - }, []string{LabelAction, LabelDatapathFamily}) - - collectors = append(collectors, IpamEvent) - c.IpamEventEnabled = true - - case Namespace + "_" + SubsystemKVStore + "_operations_duration_seconds": - KVStoreOperationsDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemKVStore, - Name: "operations_duration_seconds", - Help: "Duration in seconds of kvstore operations", - }, []string{LabelScope, LabelKind, LabelAction, LabelOutcome}) - - collectors = append(collectors, KVStoreOperationsDuration) - c.KVStoreOperationsDurationEnabled = true - - case Namespace + "_" + SubsystemKVStore + "_events_queue_seconds": - KVStoreEventsQueueDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemKVStore, - Name: "events_queue_seconds", - Help: "Duration in seconds of time received event was blocked before it could be queued", - Buckets: []float64{.002, .005, .01, .015, .025, .05, .1, .25, .5, .75, 1}, - }, []string{LabelScope, LabelAction}) - - collectors = append(collectors, KVStoreEventsQueueDuration) - c.KVStoreEventsQueueDurationEnabled = true - - case Namespace + "_" + SubsystemKVStore + "_quorum_errors_total": - KVStoreQuorumErrors = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemKVStore, - Name: "quorum_errors_total", - Help: "Number of quorum errors", - }, []string{LabelError}) - - collectors = append(collectors, KVStoreQuorumErrors) - c.KVStoreQuorumErrorsEnabled = true - - case Namespace + "_" + SubsystemIPCache + "_errors_total": - IPCacheErrorsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemIPCache, - Name: "errors_total", - Help: "Number of errors interacting with the IP to Identity cache", - }, []string{LabelType, LabelError}) - - collectors = append(collectors, IPCacheErrorsTotal) - c.IPCacheErrorsTotalEnabled = true - - case Namespace + "_" + SubsystemIPCache + "_events_total": - IPCacheEventsTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemIPCache, - Name: "events_total", - Help: "Number of events interacting with the IP to Identity cache", - }, []string{LabelType}) - - collectors = append(collectors, IPCacheEventsTotal) - c.IPCacheEventsTotalEnabled = true - - case Namespace + "_" + SubsystemFQDN + "_gc_deletions_total": - FQDNGarbageCollectorCleanedTotal = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemFQDN, - Name: "gc_deletions_total", - Help: "Number of FQDNs that have been cleaned on FQDN Garbage collector job", - }) - - collectors = append(collectors, FQDNGarbageCollectorCleanedTotal) - c.FQDNGarbageCollectorCleanedTotalEnabled = true - - case Namespace + "_" + SubsystemFQDN + "_active_names": - FQDNActiveNames = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemFQDN, - Name: "active_names", - Help: "Number of domains inside the DNS cache that have not expired (by TTL), per endpoint", - }, []string{LabelPeerEndpoint}) - - collectors = append(collectors, FQDNActiveNames) - c.FQDNActiveNames = true - - case Namespace + "_" + SubsystemFQDN + "_active_ips": - FQDNActiveIPs = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemFQDN, - Name: "active_ips", - Help: "Number of IPs inside the DNS cache associated with a domain that has not expired (by TTL), per endpoint", - }, []string{LabelPeerEndpoint}) - - collectors = append(collectors, FQDNActiveIPs) - c.FQDNActiveIPs = true - - case Namespace + "_" + SubsystemFQDN + "_alive_zombie_connections": - FQDNAliveZombieConnections = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemFQDN, - Name: "alive_zombie_connections", - Help: "Number of IPs associated with domains that have expired (by TTL) yet still associated with an active connection (aka zombie), per endpoint", - }, []string{LabelPeerEndpoint}) - - collectors = append(collectors, FQDNAliveZombieConnections) - c.FQDNActiveZombiesConnections = true - - case Namespace + "_" + SubsystemFQDN + "_semaphore_rejected_total": - FQDNSemaphoreRejectedTotal = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemFQDN, - Name: "semaphore_rejected_total", - Help: "Number of DNS request rejected by the DNS Proxy's admission semaphore", - }) - - collectors = append(collectors, FQDNSemaphoreRejectedTotal) - c.FQDNSemaphoreRejectedTotal = true - - case Namespace + "_" + SubsystemBPF + "_syscall_duration_seconds": - BPFSyscallDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemBPF, - Name: "syscall_duration_seconds", - Help: "Duration of BPF system calls", - }, []string{LabelOperation, LabelOutcome}) - - collectors = append(collectors, BPFSyscallDuration) - c.BPFSyscallDurationEnabled = true - - case Namespace + "_" + SubsystemBPF + "_map_ops_total": - BPFMapOps = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemBPF, - Name: "map_ops_total", - Help: "Total operations on map, tagged by map name", - }, []string{LabelMapName, LabelOperation, LabelOutcome}) - - collectors = append(collectors, BPFMapOps) - c.BPFMapOps = true - - case Namespace + "_" + SubsystemBPF + "_map_pressure": - c.BPFMapPressure = true - - case Namespace + "_" + SubsystemTriggers + "_policy_update_total": - TriggerPolicyUpdateTotal = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemTriggers, - Name: "policy_update_total", - Help: "Total number of policy update trigger invocations labeled by reason", - }, []string{"reason"}) - - collectors = append(collectors, TriggerPolicyUpdateTotal) - c.TriggerPolicyUpdateTotal = true - - case Namespace + "_" + SubsystemTriggers + "_policy_update_folds": - TriggerPolicyUpdateFolds = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemTriggers, - Name: "policy_update_folds", - Help: "Current number of folds", - }) - - collectors = append(collectors, TriggerPolicyUpdateFolds) - c.TriggerPolicyUpdateFolds = true - - case Namespace + "_" + SubsystemTriggers + "_policy_update_call_duration_seconds": - TriggerPolicyUpdateCallDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemTriggers, - Name: "policy_update_call_duration_seconds", - Help: "Duration of policy update trigger", - }, []string{LabelType}) - - collectors = append(collectors, TriggerPolicyUpdateCallDuration) - c.TriggerPolicyUpdateCallDuration = true - - case Namespace + "_version": - VersionMetric = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "version", - Help: "Cilium version", - }, []string{LabelVersion, LabelVersionRevision, LabelArch}) - - v := version.GetCiliumVersion() - VersionMetric.WithLabelValues(v.Version, v.Revision, v.Arch) - - collectors = append(collectors, VersionMetric) - c.VersionMetric = true - - case Namespace + "_" + SubsystemAPILimiter + "_wait_history_duration_seconds": - APILimiterWaitHistoryDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Subsystem: SubsystemAPILimiter, - Name: "wait_history_duration_seconds", - Help: "Histogram over duration of waiting period for API calls subjects to rate limiting", - }, []string{"api_call"}) - - collectors = append(collectors, APILimiterWaitHistoryDuration) - c.APILimiterWaitHistoryDuration = true - - case Namespace + "_" + SubsystemAPILimiter + "_wait_duration_seconds": - APILimiterWaitDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemAPILimiter, - Name: "wait_duration_seconds", - Help: "Current wait time for api calls", - }, []string{"api_call", "value"}) - - collectors = append(collectors, APILimiterWaitDuration) - c.APILimiterWaitDuration = true - - case Namespace + "_" + SubsystemAPILimiter + "_processing_duration_seconds": - APILimiterProcessingDuration = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemAPILimiter, - Name: "processing_duration_seconds", - Help: "Current processing time of api call", - }, []string{"api_call", "value"}) - - collectors = append(collectors, APILimiterProcessingDuration) - c.APILimiterProcessingDuration = true - - case Namespace + "_" + SubsystemAPILimiter + "_requests_in_flight": - APILimiterRequestsInFlight = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemAPILimiter, - Name: "requests_in_flight", - Help: "Current requests in flight", - }, []string{"api_call", "value"}) - - collectors = append(collectors, APILimiterRequestsInFlight) - c.APILimiterRequestsInFlight = true - - case Namespace + "_" + SubsystemAPILimiter + "_rate_limit": - APILimiterRateLimit = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemAPILimiter, - Name: "rate_limit", - Help: "Current rate limiting configuration", - }, []string{"api_call", "value"}) - - collectors = append(collectors, APILimiterRateLimit) - c.APILimiterRateLimit = true - - case Namespace + "_" + SubsystemAPILimiter + "_adjustment_factor": - APILimiterAdjustmentFactor = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: SubsystemAPILimiter, - Name: "adjustment_factor", - Help: "Current adjustment factor while auto adjusting", - }, []string{"api_call"}) - - collectors = append(collectors, APILimiterAdjustmentFactor) - c.APILimiterAdjustmentFactor = true - - case Namespace + "_" + SubsystemAPILimiter + "_processed_requests_total": - APILimiterProcessedRequests = prometheus.NewCounterVec(prometheus.CounterOpts{ - Namespace: Namespace, - Subsystem: SubsystemAPILimiter, - Name: "processed_requests_total", - Help: "Total number of API requests processed", - }, []string{"api_call", LabelOutcome}) - - collectors = append(collectors, APILimiterProcessedRequests) - c.APILimiterProcessedRequests = true - - case Namespace + "_endpoint_propagation_delay_seconds": - EndpointPropagationDelay = prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: Namespace, - Name: "endpoint_propagation_delay_seconds", - Help: "CiliumEndpoint roundtrip propagation delay in seconds", - Buckets: []float64{.05, .1, 1, 5, 30, 60, 120, 240, 300, 600}, - }, []string{}) - - collectors = append(collectors, EndpointPropagationDelay) - c.EndpointPropagationDelayEnabled = true - - case Namespace + "_node_connectivity_status": - NodeConnectivityStatus = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "node_connectivity_status", - Help: "The last observed status of both ICMP and HTTP connectivity between the current Cilium agent and other Cilium nodes", - }, []string{ - LabelSourceCluster, - LabelSourceNodeName, - LabelTargetCluster, - LabelTargetNodeName, - LabelTargetNodeType, - LabelType, - }) - - collectors = append(collectors, NodeConnectivityStatus) - c.NodeConnectivityStatusEnabled = true - - case Namespace + "_node_connectivity_latency_seconds": - NodeConnectivityLatency = prometheus.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: Namespace, - Name: "node_connectivity_latency_seconds", - Help: "The last observed latency between the current Cilium agent and other Cilium nodes in seconds", - }, []string{ - LabelSourceCluster, - LabelSourceNodeName, - LabelTargetCluster, - LabelTargetNodeName, - LabelTargetNodeIP, - LabelTargetNodeType, - LabelType, - LabelProtocol, - LabelAddressType, - }) - - collectors = append(collectors, NodeConnectivityLatency) - c.NodeConnectivityLatencyEnabled = true - } - - } - - return c, collectors -} - -// GaugeWithThreshold is a prometheus gauge that registers itself with -// prometheus if over a threshold value and unregisters when under. -type GaugeWithThreshold struct { - gauge prometheus.Gauge - threshold float64 - active bool -} - -// Set the value of the GaugeWithThreshold. -func (gwt *GaugeWithThreshold) Set(value float64) { - overThreshold := value > gwt.threshold - if gwt.active && !overThreshold { - gwt.active = !Unregister(gwt.gauge) - if gwt.active { - logrus.WithField("metric", gwt.gauge.Desc().String()).Warning("Failed to unregister metric") - } - } else if !gwt.active && overThreshold { - err := Register(gwt.gauge) - gwt.active = err == nil - if err != nil { - logrus.WithField("metric", gwt.gauge.Desc().String()).WithError(err).Warning("Failed to register metric") - } - } - - gwt.gauge.Set(value) -} - -// NewGaugeWithThreshold creates a new GaugeWithThreshold. -func NewGaugeWithThreshold(name string, subsystem string, desc string, labels map[string]string, threshold float64) *GaugeWithThreshold { - return &GaugeWithThreshold{ - gauge: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: Namespace, - Subsystem: subsystem, - Name: name, - Help: desc, - ConstLabels: labels, - }), - threshold: threshold, - active: false, - } -} - -// NewBPFMapPressureGauge creates a new GaugeWithThreshold for the -// cilium_bpf_map_pressure metric with the map name as constant label. -func NewBPFMapPressureGauge(mapname string, threshold float64) *GaugeWithThreshold { - return NewGaugeWithThreshold( - "map_pressure", - SubsystemBPF, - "Fill percentage of map, tagged by map name", - map[string]string{ - LabelMapName: mapname, - }, - threshold, - ) -} - -func init() { - ResetMetrics() -} - -func registerDefaultMetrics() { - MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{Namespace: Namespace})) - MustRegister(collectors.NewGoCollector()) - MustRegister(newStatusCollector()) - MustRegister(newbpfCollector()) -} - -func ResetMetrics() { - registry = prometheus.NewPedanticRegistry() - registerDefaultMetrics() -} - -// MustRegister adds the collector to the registry, exposing this metric to -// prometheus scrapes. -// It will panic on error. -func MustRegister(c ...prometheus.Collector) { - registry.MustRegister(c...) -} - -// Register registers a collector -func Register(c prometheus.Collector) error { - return registry.Register(c) -} - -// RegisterList registers a list of collectors. If registration of one -// collector fails, no collector is registered. -func RegisterList(list []prometheus.Collector) error { - registered := []prometheus.Collector{} - - for _, c := range list { - if err := Register(c); err != nil { - for _, c := range registered { - Unregister(c) - } - return err - } - - registered = append(registered, c) - } - - return nil -} - -// Unregister unregisters a collector -func Unregister(c prometheus.Collector) bool { - return registry.Unregister(c) -} - -// Enable begins serving prometheus metrics on the address passed in. Addresses -// of the form ":8080" will bind the port on all interfaces. -func Enable(addr string) <-chan error { - errs := make(chan error, 1) - - go func() { - // The Handler function provides a default handler to expose metrics - // via an HTTP server. "/metrics" is the usual endpoint for that. - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) - srv := http.Server{ - Addr: addr, - Handler: mux, - } - errs <- srv.ListenAndServe() - }() - - return errs -} - -// GetCounterValue returns the current value -// stored for the counter -func GetCounterValue(m prometheus.Counter) float64 { - var pm dto.Metric - err := m.Write(&pm) - if err == nil { - return *pm.Counter.Value - } - return 0 -} - -// GetGaugeValue returns the current value stored for the gauge. This function -// is useful in tests. -func GetGaugeValue(m prometheus.Gauge) float64 { - var pm dto.Metric - err := m.Write(&pm) - if err == nil { - return *pm.Gauge.Value - } - return 0 -} - -// DumpMetrics gets the current Cilium metrics and dumps all into a -// models.Metrics structure.If metrics cannot be retrieved, returns an error -func DumpMetrics() ([]*models.Metric, error) { - result := []*models.Metric{} - currentMetrics, err := registry.Gather() - if err != nil { - return result, err - } - - for _, val := range currentMetrics { - - metricName := val.GetName() - metricType := val.GetType() - - for _, metricLabel := range val.Metric { - labels := map[string]string{} - for _, label := range metricLabel.GetLabel() { - labels[label.GetName()] = label.GetValue() - } - - var value float64 - switch metricType { - case dto.MetricType_COUNTER: - value = metricLabel.Counter.GetValue() - case dto.MetricType_GAUGE: - value = metricLabel.GetGauge().GetValue() - case dto.MetricType_UNTYPED: - value = metricLabel.GetUntyped().GetValue() - case dto.MetricType_SUMMARY: - value = metricLabel.GetSummary().GetSampleSum() - case dto.MetricType_HISTOGRAM: - value = metricLabel.GetHistogram().GetSampleSum() - default: - continue - } - - metric := &models.Metric{ - Name: metricName, - Labels: labels, - Value: value, - } - result = append(result, metric) - } - } - return result, nil -} - -// Error2Outcome converts an error to LabelOutcome -func Error2Outcome(err error) string { - if err != nil { - return LabelValueOutcomeFail - } - - return LabelValueOutcomeSuccess -} - -func BoolToFloat64(v bool) float64 { - if v { - return 1 - } - return 0 -} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go b/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go deleted file mode 100644 index 9521c6d1a..000000000 --- a/vendor/github.com/cilium/cilium/pkg/metrics/metrics_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -//go:build !windows - -package metrics - -import "golang.org/x/sys/unix" - -// Errno2Outcome converts a unix.Errno to LabelOutcome -func Errno2Outcome(errno unix.Errno) string { - if errno != 0 { - return LabelValueOutcomeFail - } - - return LabelValueOutcomeSuccess -} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/middleware.go b/vendor/github.com/cilium/cilium/pkg/metrics/middleware.go deleted file mode 100644 index 0ddb96039..000000000 --- a/vendor/github.com/cilium/cilium/pkg/metrics/middleware.go +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package metrics - -import ( - "net/http" - "strconv" - "strings" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/cilium/cilium/pkg/spanstat" -) - -// APIEventTSHelper is intended to be a global middleware to track metrics -// around API calls. -// It records the timestamp of an API call in the provided gauge. -type APIEventTSHelper struct { - Next http.Handler - TSGauge GaugeVec - Histogram prometheus.ObserverVec -} - -type ResponderWrapper struct { - http.ResponseWriter - code int -} - -func (rw *ResponderWrapper) WriteHeader(code int) { - rw.code = code - rw.ResponseWriter.WriteHeader(code) -} - -// getShortPath returns the API path trimmed after the 3rd slash. -// examples: -// -// "/v1/config" -> "/v1/config" -// "/v1/endpoint/cilium-local:0" -> "/v1/endpoint" -// "/v1/endpoint/container-id:597.." -> "/v1/endpoint" -func getShortPath(s string) string { - var idxSum int - for nThSlash := 0; nThSlash < 3; nThSlash++ { - idx := strings.IndexByte(s[idxSum:], '/') - if idx == -1 { - return s - } - idxSum += idx + 1 - } - return s[:idxSum-1] -} - -// ServeHTTP implements the http.Handler interface. It records the timestamp -// this API call began at, then chains to the next handler. -func (m *APIEventTSHelper) ServeHTTP(r http.ResponseWriter, req *http.Request) { - reqOk := req != nil && req.URL != nil && req.URL.Path != "" - var path string - if reqOk { - path = getShortPath(req.URL.Path) - m.TSGauge.WithLabelValues(LabelEventSourceAPI, path, req.Method).SetToCurrentTime() - } - duration := spanstat.Start() - rw := &ResponderWrapper{ResponseWriter: r} - m.Next.ServeHTTP(rw, req) - if reqOk { - took := float64(duration.End(true).Total().Seconds()) - m.Histogram.WithLabelValues(path, req.Method, strconv.Itoa(rw.code)).Observe(took) - } -} diff --git a/vendor/github.com/cilium/cilium/pkg/metrics/status.go b/vendor/github.com/cilium/cilium/pkg/metrics/status.go deleted file mode 100644 index 98c4c9d9e..000000000 --- a/vendor/github.com/cilium/cilium/pkg/metrics/status.go +++ /dev/null @@ -1,162 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package metrics - -import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/sirupsen/logrus" - - clientPkg "github.com/cilium/cilium/pkg/client" - healthClientPkg "github.com/cilium/cilium/pkg/health/client" -) - -type statusCollector struct { - daemonHealthGetter daemonHealthGetter - connectivityStatusGetter connectivityStatusGetter - - controllersFailingDesc *prometheus.Desc - ipAddressesDesc *prometheus.Desc - unreachableNodesDesc *prometheus.Desc - unreachableHealthEndpointsDesc *prometheus.Desc -} - -func newStatusCollector() *statusCollector { - ciliumClient, err := clientPkg.NewClient("") - if err != nil { - logrus.WithError(err).Fatal("Error while creating Cilium API client") - } - - healthClient, err := healthClientPkg.NewClient("") - if err != nil { - logrus.WithError(err).Fatal("Error while creating cilium-health API client") - } - - return newStatusCollectorWithClients(ciliumClient.Daemon, healthClient.Connectivity) -} - -// newStatusCollectorWithClients provides a constructor with injected clients -func newStatusCollectorWithClients(d daemonHealthGetter, c connectivityStatusGetter) *statusCollector { - return &statusCollector{ - daemonHealthGetter: d, - connectivityStatusGetter: c, - controllersFailingDesc: prometheus.NewDesc( - prometheus.BuildFQName(Namespace, "", "controllers_failing"), - "Number of failing controllers", - nil, nil, - ), - ipAddressesDesc: prometheus.NewDesc( - prometheus.BuildFQName(Namespace, "", "ip_addresses"), - "Number of allocated IP addresses", - []string{"family"}, nil, - ), - unreachableNodesDesc: prometheus.NewDesc( - prometheus.BuildFQName(Namespace, "", "unreachable_nodes"), - "Number of nodes that cannot be reached", - nil, nil, - ), - unreachableHealthEndpointsDesc: prometheus.NewDesc( - prometheus.BuildFQName(Namespace, "", "unreachable_health_endpoints"), - "Number of health endpoints that cannot be reached", - nil, nil, - ), - } -} - -func (s *statusCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- s.controllersFailingDesc - ch <- s.ipAddressesDesc - ch <- s.unreachableNodesDesc - ch <- s.unreachableHealthEndpointsDesc -} - -func (s *statusCollector) Collect(ch chan<- prometheus.Metric) { - statusResponse, err := s.daemonHealthGetter.GetHealthz(nil) - if err != nil { - logrus.WithError(err).Error("Error while getting Cilium status") - return - } - - if statusResponse.Payload == nil { - return - } - - // Controllers failing - controllersFailing := 0 - - for _, ctrl := range statusResponse.Payload.Controllers { - if ctrl.Status == nil { - continue - } - if ctrl.Status.ConsecutiveFailureCount > 0 { - controllersFailing++ - } - } - - ch <- prometheus.MustNewConstMetric( - s.controllersFailingDesc, - prometheus.GaugeValue, - float64(controllersFailing), - ) - - if statusResponse.Payload.Ipam != nil { - // Address count - ch <- prometheus.MustNewConstMetric( - s.ipAddressesDesc, - prometheus.GaugeValue, - float64(len(statusResponse.Payload.Ipam.IPV4)), - "ipv4", - ) - - ch <- prometheus.MustNewConstMetric( - s.ipAddressesDesc, - prometheus.GaugeValue, - float64(len(statusResponse.Payload.Ipam.IPV6)), - "ipv6", - ) - } - - healthStatusResponse, err := s.connectivityStatusGetter.GetStatus(nil) - if err != nil { - logrus.WithError(err).Error("Error while getting cilium-health status") - return - } - - if healthStatusResponse.Payload == nil { - return - } - - // Nodes and endpoints healthStatusResponse - var ( - unreachableNodes int - unreachableEndpoints int - ) - - for _, nodeStatus := range healthStatusResponse.Payload.Nodes { - for _, addr := range healthClientPkg.GetAllHostAddresses(nodeStatus) { - if healthClientPkg.GetPathConnectivityStatusType(addr) == healthClientPkg.ConnStatusUnreachable { - unreachableNodes++ - break - } - } - - for _, addr := range healthClientPkg.GetAllEndpointAddresses(nodeStatus) { - if healthClientPkg.GetPathConnectivityStatusType(addr) == healthClientPkg.ConnStatusUnreachable { - unreachableEndpoints++ - break - } - } - } - - ch <- prometheus.MustNewConstMetric( - s.unreachableNodesDesc, - prometheus.GaugeValue, - float64(unreachableNodes), - ) - - ch <- prometheus.MustNewConstMetric( - s.unreachableHealthEndpointsDesc, - prometheus.GaugeValue, - float64(unreachableEndpoints), - ) -} diff --git a/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go b/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go index e44c2f282..5c64bf94d 100644 --- a/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go +++ b/vendor/github.com/cilium/cilium/pkg/monitor/api/drop.go @@ -88,6 +88,8 @@ var errors = map[uint8]string{ 190: "No conntrack map found", 191: "No nat map found", 192: "Invalid ClusterID", + 193: "Unsupported packet protocol for DSR encapsulation", + 194: "No egress gateway found", } func extendedReason(reason uint8, extError int8) string { diff --git a/vendor/github.com/cilium/cilium/pkg/monitor/api/types.go b/vendor/github.com/cilium/cilium/pkg/monitor/api/types.go index 2dba0d1af..94afb9492 100644 --- a/vendor/github.com/cilium/cilium/pkg/monitor/api/types.go +++ b/vendor/github.com/cilium/cilium/pkg/monitor/api/types.go @@ -7,6 +7,7 @@ import ( "encoding/json" "fmt" "net" + "sort" "strconv" "strings" "time" @@ -85,6 +86,21 @@ var ( } ) +// AllMessageTypeNames returns a slice of MessageTypeNames +func AllMessageTypeNames() []string { + names := make([]string, 0, len(MessageTypeNames)) + for name := range MessageTypeNames { + names = append(names, name) + } + + // Sort by the underlying MessageType + sort.SliceStable(names, func(i, j int) bool { + return MessageTypeNames[names[i]] < MessageTypeNames[names[j]] + }) + + return names +} + // MessageTypeName returns the name for a message type or the numeric value if // the name can't be found func MessageTypeName(typ int) string { diff --git a/vendor/github.com/cilium/cilium/pkg/option/config.go b/vendor/github.com/cilium/cilium/pkg/option/config.go index 30fd0f4d4..d665fb52e 100644 --- a/vendor/github.com/cilium/cilium/pkg/option/config.go +++ b/vendor/github.com/cilium/cilium/pkg/option/config.go @@ -19,7 +19,6 @@ import ( "strings" "time" - "github.com/prometheus/client_golang/prometheus" "github.com/shirou/gopsutil/v3/mem" "github.com/sirupsen/logrus" "github.com/spf13/cast" @@ -31,7 +30,6 @@ import ( "github.com/cilium/cilium/pkg/cidr" clustermeshTypes "github.com/cilium/cilium/pkg/clustermesh/types" "github.com/cilium/cilium/pkg/command" - "github.com/cilium/cilium/pkg/common" "github.com/cilium/cilium/pkg/defaults" "github.com/cilium/cilium/pkg/ip" ipamOption "github.com/cilium/cilium/pkg/ipam/option" @@ -39,7 +37,6 @@ import ( "github.com/cilium/cilium/pkg/logging" "github.com/cilium/cilium/pkg/logging/logfields" "github.com/cilium/cilium/pkg/mac" - "github.com/cilium/cilium/pkg/metrics" "github.com/cilium/cilium/pkg/version" ) @@ -169,6 +166,10 @@ const ( // ProxyMaxConnectionDuration specifies the max_connection_duration setting for the proxy in seconds ProxyMaxConnectionDuration = "proxy-max-connection-duration-seconds" + // ProxyIdleTimeout specifies the idle_timeout setting (in seconds), which applies + // for the connection from proxy to upstream cluster + ProxyIdleTimeout = "proxy-idle-timeout-seconds" + // FixedIdentityMapping is the key-value for the fixed identity mapping // which allows to use reserved label for fixed identities FixedIdentityMapping = "fixed-identity-mapping" @@ -198,6 +199,10 @@ const ( // should watch for. K8sWatcherEndpointSelector = "k8s-watcher-endpoint-selector" + // EnableK8s operation of Kubernetes-related services/controllers. + // Intended for operating cilium with CNI-compatible orchestrators other than Kubernetes. (default is true) + EnableK8s = "enable-k8s" + // K8sAPIServer is the kubernetes api address server (for https use --k8s-kubeconfig-path instead) K8sAPIServer = "k8s-api-server" @@ -307,6 +312,11 @@ const ( // conflicting marks. EnableIdentityMark = "enable-identity-mark" + // EnableHighScaleIPcache enables the special ipcache mode for high scale + // clusters. The ipcache content will be reduced to the strict minimum and + // traffic will be encapsulated to carry security identities. + EnableHighScaleIPcache = "enable-high-scale-ipcache" + // AddressScopeMax controls the maximum address scope for addresses to be // considered local ones with HOST_ID in the ipcache AddressScopeMax = "local-max-addr-scope" @@ -424,6 +434,9 @@ const ( // PrometheusServeAddr IP:Port on which to serve prometheus metrics (pass ":Port" to bind on all interfaces, "" is off) PrometheusServeAddr = "prometheus-serve-addr" + // ExternalEnvoyProxy defines whether the Envoy is deployed externally in form of a DaemonSet or not. + ExternalEnvoyProxy = "external-envoy-proxy" + // CMDRef is the path to cmdref output directory CMDRef = "cmdref" @@ -498,6 +511,13 @@ const ( // TunnelName is the name of the Tunnel option TunnelName = "tunnel" + + // RoutingMode is the name of the option to choose between native routing and tunneling mode + RoutingMode = "routing-mode" + + // TunnelProtocol is the name of the option to select the tunneling protocol + TunnelProtocol = "tunnel-protocol" + // TunnelPortName is the name of the TunnelPort option TunnelPortName = "tunnel-port" @@ -528,12 +548,13 @@ const ( // ClusterIDName is the name of the ClusterID option ClusterIDName = "cluster-id" - // ClusterMeshConfigName is the name of the ClusterMeshConfig option - ClusterMeshConfigName = "clustermesh-config" - // CNIChainingMode configures which CNI plugin Cilium is chained with. CNIChainingMode = "cni-chaining-mode" + // CNIChainingTarget is the name of a CNI network in to which we should + // insert our plugin configuration + CNIChainingTarget = "cni-chaining-target" + // AuthMapEntriesMin defines the minimum auth map limit. AuthMapEntriesMin = 1 << 8 @@ -652,9 +673,6 @@ const ( // of MaxControllerInterval. MaxCtrlIntervalName = "max-controller-interval" - // SockopsEnableName is the name of the option to enable sockops - SockopsEnableName = "sockops-enable" - // K8sNamespaceName is the name of the K8sNamespace option K8sNamespaceName = "k8s-namespace" @@ -689,12 +707,6 @@ const ( // IPv6MCastDevice is the name of the option to select IPv6 multicast device IPv6MCastDevice = "ipv6-mcast-device" - // EnableMonitor is the name of the option to enable the monitor socket - EnableMonitorName = "enable-monitor" - - // MonitorQueueSizeName is the name of the option MonitorQueueSize - MonitorQueueSizeName = "monitor-queue-size" - // FQDNRejectResponseCode is the name for the option for dns-proxy reject response code FQDNRejectResponseCode = "tofqdns-dns-reject-response-code" @@ -730,12 +742,32 @@ const ( // EnableIPSecName is the name of the option to enable IPSec EnableIPSecName = "enable-ipsec" + // Duration of the IPsec key rotation. After that time, we will clean the + // previous IPsec key from the node. + IPsecKeyRotationDuration = "ipsec-key-rotation-duration" + + // Enable watcher for IPsec key. If disabled, a restart of the agent will + // be necessary on key rotations. + EnableIPsecKeyWatcher = "enable-ipsec-key-watcher" + // IPSecKeyFileName is the name of the option for ipsec key file IPSecKeyFileName = "ipsec-key-file" // EnableWireguard is the name of the option to enable wireguard EnableWireguard = "enable-wireguard" + // EnableL2Announcements is the name of the option to enable l2 announcements + EnableL2Announcements = "enable-l2-announcements" + + // L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen. + L2AnnouncerLeaseDuration = "l2-announcements-lease-duration" + + // L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time. + L2AnnouncerRenewDeadline = "l2-announcements-renew-deadline" + + // L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time. + L2AnnouncerRetryPeriod = "l2-announcements-retry-period" + // EnableWireguardUserspaceFallback is the name of the option that enables the fallback to wireguard userspace mode EnableWireguardUserspaceFallback = "enable-wireguard-userspace-fallback" @@ -790,10 +822,6 @@ const ( // K8sEventHandover is the name of the K8sEventHandover option K8sEventHandover = "enable-k8s-event-handover" - // Metrics represents the metrics subsystem that Cilium should expose - // to prometheus. - Metrics = "metrics" - // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 = "ipv4-service-loopback-address" @@ -803,10 +831,6 @@ const ( // LocalRouterIPv6 is the link-local IPv6 address to use for Cilium router device LocalRouterIPv6 = "local-router-ipv6" - // ForceLocalPolicyEvalAtSource forces a policy decision at the source - // endpoint for all local communication - ForceLocalPolicyEvalAtSource = "force-local-policy-eval-at-source" - // EnableEndpointRoutes enables use of per endpoint routes EnableEndpointRoutes = "enable-endpoint-routes" @@ -827,6 +851,9 @@ const ( // IPAM is the IPAM method to use IPAM = "ipam" + // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool + IPAMMultiPoolPreAllocation = "ipam-multi-pool-pre-allocation" + // XDPModeNative for loading progs with XDPModeLinkDriver XDPModeNative = "native" @@ -972,6 +999,10 @@ const ( // HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped HubbleSkipUnknownCGroupIDs = "hubble-skip-unknown-cgroup-ids" + // HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe. + // By default, Hubble observes all monitor events. + HubbleMonitorEvents = "hubble-monitor-events" + // DisableIptablesFeederRules specifies which chains will be excluded // when installing the feeder rules DisableIptablesFeederRules = "disable-iptables-feeder-rules" @@ -1070,6 +1101,9 @@ const ( // EnableICMPRules enables ICMP-based rule support for Cilium Network Policies. EnableICMPRules = "enable-icmp-rules" + // Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation. + UseCiliumInternalIPForIPsec = "use-cilium-internal-ip-for-ipsec" + // BypassIPAvailabilityUponRestore bypasses the IP availability error // within IPAM upon endpoint restore and allows the use of the restored IP // regardless of whether it's available in the pool. @@ -1152,6 +1186,15 @@ const ( TunnelDisabled = "disabled" ) +// Available options for DaemonConfig.RoutingMode +const ( + // RoutingModeNative specifies native routing mode + RoutingModeNative = "native" + + // RoutingModeTunnel specifies tunneling mode + RoutingModeTunnel = "tunnel" +) + // Envoy option names const ( // HTTPNormalizePath switches on Envoy HTTP path normalization options, which currently @@ -1209,6 +1252,9 @@ const ( // EnableCiliumEndpointSlice enables the cilium endpoint slicing feature. EnableCiliumEndpointSlice = "enable-cilium-endpoint-slice" + + // EnableExternalWorkloads enables the support for external workloads. + EnableExternalWorkloads = "enable-external-workloads" ) const ( @@ -1224,21 +1270,24 @@ const ( // NodePortModeDSR is for performing DSR for requests to remote nodes NodePortModeDSR = "dsr" + // NodePortModeHybrid is a dual mode of the above, that is, DSR for TCP and SNAT for UDP + NodePortModeHybrid = "hybrid" + // NodePortAlgRandom is for randomly selecting a backend NodePortAlgRandom = "random" // NodePortAlgMaglev is for using maglev consistent hashing for backend selection NodePortAlgMaglev = "maglev" - // NodePortModeHybrid is a dual mode of the above, that is, DSR for TCP and SNAT for UDP - NodePortModeHybrid = "hybrid" - // DSR dispatch mode to encode service into IP option or extension header DSRDispatchOption = "opt" // DSR dispatch mode to encapsulate to IPIP DSRDispatchIPIP = "ipip" + // DSR dispatch mode to encapsulate to Geneve + DSRDispatchGeneve = "geneve" + // DSR L4 translation to frontend port DSRL4XlateFrontend = "frontend" @@ -1314,9 +1363,16 @@ func LogRegisteredOptions(vp *viper.Viper, entry *logrus.Entry) { keys := vp.AllKeys() sort.Strings(keys) for _, k := range keys { - v := vp.GetStringSlice(k) - if len(v) > 0 { - entry.Infof(" --%s='%s'", k, strings.Join(v, ",")) + ss := vp.GetStringSlice(k) + if len(ss) == 0 { + sm := vp.GetStringMap(k) + for k, v := range sm { + ss = append(ss, fmt.Sprintf("%s=%s", k, v)) + } + } + + if len(ss) > 0 { + entry.Infof(" --%s='%s'", k, strings.Join(ss, ",")) } else { entry.Infof(" --%s='%s'", k, vp.GetString(k)) } @@ -1329,6 +1385,7 @@ type DaemonConfig struct { BpfDir string // BPF template files directory LibDir string // Cilium library files directory RunDir string // Cilium runtime directory + ExternalEnvoyProxy bool // Whether Envoy is deployed as external DaemonSet or not devicesMu lock.RWMutex // Protects devices devices []string // bpf_host device DirectRoutingDevice string // Direct routing device (used by BPF NodePort and BPF Host Routing) @@ -1345,9 +1402,11 @@ type DaemonConfig struct { // devices. EnableRuntimeDeviceDetection bool - DatapathMode string // Datapath mode - Tunnel string // Tunnel mode - TunnelPort int // Tunnel port + DatapathMode string // Datapath mode + Tunnel string // Tunnel mode + RoutingMode string // Routing mode + TunnelProtocol string // Tunneling protocol + TunnelPort int // Tunnel port DryMode bool // Do not create BPF maps, devices, .. @@ -1431,9 +1490,6 @@ type DaemonConfig struct { // ClusterID is the unique identifier of the cluster ClusterID uint32 - // ClusterMeshConfig is the path to the clustermesh configuration directory - ClusterMeshConfig string - // CTMapEntriesGlobalTCP is the maximum number of conntrack entries // allowed in each TCP CT table for IPv4/IPv6. CTMapEntriesGlobalTCP int @@ -1451,9 +1507,6 @@ type DaemonConfig struct { CTMapEntriesTimeoutSYN time.Duration CTMapEntriesTimeoutFIN time.Duration - // EnableMonitor enables the monitor unix domain socket server - EnableMonitor bool - // MonitorAggregationInterval configures the interval between monitor // messages when monitor aggregation is enabled. MonitorAggregationInterval time.Duration @@ -1486,10 +1539,6 @@ type DaemonConfig struct { // allowed in the BPF rev nat table SockRevNatEntries int - // EgressGatewayPolicyMapEntries is the maximum number of entries - // allowed in the BPF egress gateway policy map. - EgressGatewayPolicyMapEntries int - // DisableCiliumEndpointCRD disables the use of CiliumEndpoint CRD DisableCiliumEndpointCRD bool @@ -1550,13 +1599,14 @@ type DaemonConfig struct { // ProxyMaxConnectionDuration specifies the max_connection_duration setting for the proxy ProxyMaxConnectionDuration time.Duration + // ProxyIdleTimeout specifies the idle_timeout setting (in seconds), which applies + // for the connection from proxy to upstream cluster + ProxyIdleTimeout time.Duration + // EnvoyLogPath specifies where to store the Envoy proxy logs when Envoy // runs in the same container as Cilium. EnvoyLogPath string - // EnableSockOps specifies whether to enable sockops (socket lookup). - SockopsEnable bool - ProcFs string // PrependIptablesChains is the name of the option to enable prepending @@ -1621,12 +1671,30 @@ type DaemonConfig struct { // IPSec key file for stored keys IPSecKeyFile string + // Duration of the IPsec key rotation. After that time, we will clean the + // previous IPsec key from the node. + IPsecKeyRotationDuration time.Duration + + // Enable watcher for IPsec key. If disabled, a restart of the agent will + // be necessary on key rotations. + EnableIPsecKeyWatcher bool + // EnableWireguard enables Wireguard encryption EnableWireguard bool // EnableWireguardUserspaceFallback enables the fallback to the userspace implementation EnableWireguardUserspaceFallback bool + // EnableL2Announcements enables L2 announcement of service IPs + EnableL2Announcements bool + + // L2AnnouncerLeaseDuration, if a lease has not been renewed for X amount of time, a new leader can be chosen. + L2AnnouncerLeaseDuration time.Duration + // L2AnnouncerRenewDeadline, the leader will renew the lease every X amount of time. + L2AnnouncerRenewDeadline time.Duration + // L2AnnouncerRetryPeriod, on renew failure, retry after X amount of time. + L2AnnouncerRetryPeriod time.Duration + // NodeEncryptionOptOutLabels contains the label selectors for nodes opting out of // node-to-node encryption // This field ignored when marshalling to JSON in DaemonConfig.StoreInFile, @@ -1637,9 +1705,6 @@ type DaemonConfig struct { // the label selector in the above field. NodeEncryptionOptOutLabelsString string - // MonitorQueueSize is the size of the monitor event queue - MonitorQueueSize int - // CLI options BPFRoot string @@ -1850,9 +1915,6 @@ type DaemonConfig struct { // clusters. K8sEventHandover bool - // MetricsConfig is the configuration set in metrics - MetricsConfig metrics.Configuration - // LoopbackIPv4 is the address to use for service loopback SNAT LoopbackIPv4 string @@ -1862,37 +1924,12 @@ type DaemonConfig struct { // LocalRouterIPv6 is the link-local IPv6 address used for Cilium's router device LocalRouterIPv6 string - // ForceLocalPolicyEvalAtSource forces a policy decision at the source - // endpoint for all local communication - ForceLocalPolicyEvalAtSource bool - // EnableEndpointRoutes enables use of per endpoint routes EnableEndpointRoutes bool // Specifies wheather to annotate the kubernetes nodes or not AnnotateK8sNode bool - // RunMonitorAgent indicates whether to run the monitor agent - RunMonitorAgent bool - - // ReadCNIConfiguration reads the CNI configuration file and extracts - // Cilium relevant information. This can be used to pass per node - // configuration to Cilium. - ReadCNIConfiguration string - - // WriteCNIConfigurationWhenReady writes the CNI configuration to the - // specified location once the agent is ready to serve requests. This - // allows to keep a Kubernetes node NotReady until Cilium is up and - // running and able to schedule endpoints. - WriteCNIConfigurationWhenReady string - - // CNIExclusive, if true, directs the agent to remove all other CNI configuration files - CNIExclusive bool - - // CNILogFile is a path on disk (on the host) for the CNI plugin binary to use - // for logging. - CNILogFile string - // EnableNodePort enables k8s NodePort service implementation in BPF EnableNodePort bool @@ -2019,6 +2056,11 @@ type DaemonConfig struct { // conflicting marks. EnableIdentityMark bool + // EnableHighScaleIPcache enables the special ipcache mode for high scale + // clusters. The ipcache content will be reduced to the strict minimum and + // traffic will be encapsulated to carry security identities. + EnableHighScaleIPcache bool + // KernelHz is the HZ rate the kernel is operating in KernelHz int @@ -2035,8 +2077,8 @@ type DaemonConfig struct { // IPAM is the IPAM method to use IPAM string - // Enable chaining with another CNI plugin. - CNIChainingMode string + // IPAMMultiPoolPreAllocation defines the pre-allocation value for each IPAM pool + IPAMMultiPoolPreAllocation map[string]string // AutoCreateCiliumNodeResource enables automatic creation of a // CiliumNode resource for the local node @@ -2155,6 +2197,10 @@ type DaemonConfig struct { // HubbleSkipUnknownCGroupIDs specifies if events with unknown cgroup ids should be skipped HubbleSkipUnknownCGroupIDs bool + // HubbleMonitorEvents specifies Cilium monitor events for Hubble to observe. + // By default, Hubble observes all monitor events. + HubbleMonitorEvents []string + // EndpointStatus enables population of information in the // CiliumEndpoint.Status resource EndpointStatus map[string]struct{} @@ -2273,6 +2319,9 @@ type DaemonConfig struct { // EnableICMPRules enables ICMP-based rule support for Cilium Network Policies. EnableICMPRules bool + // Use the CiliumInternalIPs (vs. NodeInternalIPs) for IPsec encapsulation. + UseCiliumInternalIPForIPsec bool + // BypassIPAvailabilityUponRestore bypasses the IP availability error // within IPAM upon endpoint restore and allows the use of the restored IP // regardless of whether it's available in the pool. @@ -2355,7 +2404,6 @@ var ( KVStoreOpt: make(map[string]string), LogOpt: make(map[string]string), LoopbackIPv4: defaults.LoopbackIPv4, - ForceLocalPolicyEvalAtSource: defaults.ForceLocalPolicyEvalAtSource, EnableEndpointRoutes: defaults.EnableEndpointRoutes, AnnotateK8sNode: defaults.AnnotateK8sNode, K8sServiceCacheSize: defaults.K8sServiceCacheSize, @@ -2366,6 +2414,7 @@ var ( K8sEnableK8sEndpointSlice: defaults.K8sEnableEndpointSlice, AllocatorListTimeout: defaults.AllocatorListTimeout, EnableICMPRules: defaults.EnableICMPRules, + UseCiliumInternalIPForIPsec: defaults.UseCiliumInternalIPForIPsec, K8sEnableLeasesFallbackDiscovery: defaults.K8sEnableLeasesFallbackDiscovery, APIRateLimit: make(map[string]string), @@ -2469,15 +2518,19 @@ func (c *DaemonConfig) AlwaysAllowLocalhost() bool { } } -// TunnelingEnabled returns true if tunneling is enabled, i.e. not set to "disabled". +// TunnelingEnabled returns true if tunneling is enabled. func (c *DaemonConfig) TunnelingEnabled() bool { - return c.Tunnel != TunnelDisabled + // We check if routing mode is not native rather than checking if it's + // tunneling because, in unit tests, RoutingMode is usually not set and we + // would like for TunnelingEnabled to default to the actual default + // (tunneling is enabled) in that case. + return c.RoutingMode != RoutingModeNative } // TunnelDevice returns cilium_{vxlan,geneve} depending on the config or "" if disabled. func (c *DaemonConfig) TunnelDevice() string { if c.TunnelingEnabled() { - return fmt.Sprintf("cilium_%s", c.Tunnel) + return fmt.Sprintf("cilium_%s", c.TunnelProtocol) } else { return "" } @@ -2486,14 +2539,19 @@ func (c *DaemonConfig) TunnelDevice() string { // TunnelExists returns true if some traffic may go through a tunnel, including // if the primary mode is native routing. For example, in the egress gateway, // we may send such traffic to a gateway node via a tunnel. +// In conjunction with the DSR Geneve and the direct routing, traffic from +// intermediate nodes to backend pods go through a tunnel, but the datapath logic +// takes care of the MTU overhead. So no need to take it into account here. +// See encap_geneve_dsr_opt[4,6] in nodeport.h func (c *DaemonConfig) TunnelExists() bool { - return c.TunnelingEnabled() || c.EnableIPv4EgressGateway + return c.TunnelingEnabled() || c.EnableIPv4EgressGateway || c.EnableHighScaleIPcache } // AreDevicesRequired returns true if the agent needs to attach to the native // devices to implement some features. func (c *DaemonConfig) AreDevicesRequired() bool { - return c.EnableNodePort || c.EnableHostFirewall || c.EnableBandwidthManager || c.EnableWireguard + return c.EnableNodePort || c.EnableHostFirewall || c.EnableBandwidthManager || + c.EnableWireguard || c.EnableHighScaleIPcache || c.EnableL2Announcements } // MasqueradingEnabled returns true if either IPv4 or IPv6 masquerading is enabled. @@ -2522,7 +2580,7 @@ func (c *DaemonConfig) IptablesMasqueradingEnabled() bool { // NodeIpsetNeeded returns true if a node ipsets should be used to skip // masquerading for traffic to cluster nodes. func (c *DaemonConfig) NodeIpsetNeeded() bool { - return c.Tunnel == TunnelDisabled && c.IptablesMasqueradingEnabled() + return !c.TunnelingEnabled() && c.IptablesMasqueradingEnabled() } // RemoteNodeIdentitiesEnabled returns true if the remote-node identity feature @@ -2643,6 +2701,12 @@ func (c *DaemonConfig) DirectRoutingDeviceRequired() bool { // BPF NodePort and BPF Host Routing are using the direct routing device now. // When tunneling is enabled, node-to-node redirection will be done by tunneling. BPFHostRoutingEnabled := !c.EnableHostLegacyRouting + + // XDP needs IPV4_DIRECT_ROUTING when building tunnel headers: + if c.EnableNodePort && c.NodePortAcceleration != NodePortAccelerationDisabled { + return true + } + return (c.EnableNodePort || BPFHostRoutingEnabled || Config.EnableWireguard) && !c.TunnelingEnabled() } @@ -2707,15 +2771,22 @@ func (c *DaemonConfig) Validate(vp *viper.Viper) error { } } - switch c.Tunnel { - case TunnelVXLAN, TunnelGeneve, "": - case TunnelDisabled: - if c.UseSingleClusterRoute { - return fmt.Errorf("option --%s cannot be used in combination with --%s=%s", - SingleClusterRouteName, TunnelName, TunnelDisabled) - } + switch c.RoutingMode { + case RoutingModeNative, RoutingModeTunnel: + default: + return fmt.Errorf("invalid routing mode %q, valid modes = {%q, %q}", + c.RoutingMode, RoutingModeTunnel, RoutingModeNative) + } + + switch c.TunnelProtocol { + case TunnelVXLAN, TunnelGeneve: default: - return fmt.Errorf("invalid tunnel mode '%s', valid modes = {%s}", c.Tunnel, GetTunnelModes()) + return fmt.Errorf("invalid tunnel protocol %q", c.TunnelProtocol) + } + + if c.RoutingMode == RoutingModeNative && c.UseSingleClusterRoute { + return fmt.Errorf("option --%s cannot be used in combination with --%s=%s", + SingleClusterRouteName, RoutingMode, RoutingModeNative) } if c.ClusterID < clustermeshTypes.ClusterIDMin || c.ClusterID > clustermeshTypes.ClusterIDMax { @@ -2876,8 +2947,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.CGroupRoot = vp.GetString(CGroupRoot) c.ClusterID = vp.GetUint32(ClusterIDName) c.ClusterName = vp.GetString(ClusterName) - c.ClusterMeshConfig = vp.GetString(ClusterMeshConfigName) - c.CNIChainingMode = vp.GetString(CNIChainingMode) c.DatapathMode = vp.GetString(DatapathMode) c.Debug = vp.GetBool(DebugArg) c.DebugVerbose = vp.GetStringSlice(DebugVerbose) @@ -2893,6 +2962,10 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IPv6MCastDevice = vp.GetString(IPv6MCastDevice) c.EnableIPSec = vp.GetBool(EnableIPSecName) c.EnableWireguard = vp.GetBool(EnableWireguard) + c.EnableL2Announcements = vp.GetBool(EnableL2Announcements) + c.L2AnnouncerLeaseDuration = vp.GetDuration(L2AnnouncerLeaseDuration) + c.L2AnnouncerRenewDeadline = vp.GetDuration(L2AnnouncerRenewDeadline) + c.L2AnnouncerRetryPeriod = vp.GetDuration(L2AnnouncerRetryPeriod) c.EnableWireguardUserspaceFallback = vp.GetBool(EnableWireguardUserspaceFallback) c.EnableWellKnownIdentities = vp.GetBool(EnableWellKnownIdentities) c.EnableXDPPrefilter = vp.GetBool(EnableXDPPrefilter) @@ -2936,7 +3009,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EncryptInterface = vp.GetStringSlice(EncryptInterface) c.EncryptNode = vp.GetBool(EncryptNode) c.EnvoyLogPath = vp.GetString(EnvoyLog) - c.ForceLocalPolicyEvalAtSource = vp.GetBool(ForceLocalPolicyEvalAtSource) c.HTTPNormalizePath = vp.GetBool(HTTPNormalizePath) c.HTTPIdleTimeout = vp.GetInt(HTTPIdleTimeout) c.HTTPMaxGRPCTimeout = vp.GetInt(HTTPMaxGRPCTimeout) @@ -2982,7 +3054,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableBPFClockProbe = vp.GetBool(EnableBPFClockProbe) c.EnableIPMasqAgent = vp.GetBool(EnableIPMasqAgent) c.EnableIPv4EgressGateway = vp.GetBool(EnableIPv4EgressGateway) - c.EgressGatewayPolicyMapEntries = vp.GetInt(EgressGatewayPolicyMapEntriesName) c.EnableEnvoyConfig = vp.GetBool(EnableEnvoyConfig) c.EnableIngressController = vp.GetBool(EnableIngressController) c.EnableGatewayAPI = vp.GetBool(EnableGatewayAPI) @@ -2992,33 +3063,29 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.IPTablesLockTimeout = vp.GetDuration(IPTablesLockTimeout) c.IPTablesRandomFully = vp.GetBool(IPTablesRandomFully) c.IPSecKeyFile = vp.GetString(IPSecKeyFileName) - c.EnableMonitor = vp.GetBool(EnableMonitorName) + c.IPsecKeyRotationDuration = vp.GetDuration(IPsecKeyRotationDuration) + c.EnableIPsecKeyWatcher = vp.GetBool(EnableIPsecKeyWatcher) c.MonitorAggregation = vp.GetString(MonitorAggregationName) c.MonitorAggregationInterval = vp.GetDuration(MonitorAggregationInterval) - c.MonitorQueueSize = vp.GetInt(MonitorQueueSizeName) c.MTU = vp.GetInt(MTUName) c.PreAllocateMaps = vp.GetBool(PreAllocateMapsName) c.PrependIptablesChains = vp.GetBool(PrependIptablesChainsName) c.ProcFs = vp.GetString(ProcFs) - c.PrometheusServeAddr = vp.GetString(PrometheusServeAddr) c.ProxyConnectTimeout = vp.GetInt(ProxyConnectTimeout) c.ProxyGID = vp.GetInt(ProxyGID) c.ProxyPrometheusPort = vp.GetInt(ProxyPrometheusPort) c.ProxyMaxRequestsPerConnection = vp.GetInt(ProxyMaxRequestsPerConnection) c.ProxyMaxConnectionDuration = time.Duration(vp.GetInt64(ProxyMaxConnectionDuration)) - c.ReadCNIConfiguration = vp.GetString(ReadCNIConfiguration) + c.ProxyIdleTimeout = time.Duration(vp.GetInt64(ProxyIdleTimeout)) c.RestoreState = vp.GetBool(Restore) c.RouteMetric = vp.GetInt(RouteMetric) c.RunDir = vp.GetString(StateDir) + c.ExternalEnvoyProxy = vp.GetBool(ExternalEnvoyProxy) c.SidecarIstioProxyImage = vp.GetString(SidecarIstioProxyImage) c.UseSingleClusterRoute = vp.GetBool(SingleClusterRouteName) c.SocketPath = vp.GetString(SocketPath) - c.SockopsEnable = vp.GetBool(SockopsEnableName) c.TracePayloadlen = vp.GetInt(TracePayloadlen) c.Version = vp.GetString(Version) - c.WriteCNIConfigurationWhenReady = vp.GetString(WriteCNIConfigurationWhenReady) - c.CNIExclusive = vp.GetBool(CNIExclusive) - c.CNILogFile = vp.GetString(CNILogFile) c.PolicyTriggerInterval = vp.GetDuration(PolicyTriggerInterval) c.CTMapEntriesTimeoutTCP = vp.GetDuration(CTMapEntriesTimeoutTCPName) c.CTMapEntriesTimeoutAny = vp.GetDuration(CTMapEntriesTimeoutAnyName) @@ -3043,6 +3110,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.BGPConfigPath = vp.GetString(BGPConfigPath) c.ExternalClusterIP = vp.GetBool(ExternalClusterIPName) c.EnableNat46X64Gateway = vp.GetBool(EnableNat46X64Gateway) + c.EnableHighScaleIPcache = vp.GetBool(EnableHighScaleIPcache) c.EnableIPv4Masquerade = vp.GetBool(EnableIPv4Masquerade) && c.EnableIPv4 c.EnableIPv6Masquerade = vp.GetBool(EnableIPv6Masquerade) && c.EnableIPv6 c.EnableBPFMasquerade = vp.GetBool(EnableBPFMasquerade) @@ -3073,17 +3141,35 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.TCFilterPriority = uint16(tcFilterPrio) c.Tunnel = vp.GetString(TunnelName) + c.RoutingMode = vp.GetString(RoutingMode) + c.TunnelProtocol = vp.GetString(TunnelProtocol) c.TunnelPort = vp.GetInt(TunnelPortName) + if c.Tunnel != "" && c.RoutingMode != defaults.RoutingMode { + log.Fatalf("Option --%s cannot be used in combination with --%s", RoutingMode, TunnelName) + } + + if c.Tunnel == "disabled" { + c.RoutingMode = RoutingModeNative + } else if c.Tunnel != "" { + c.TunnelProtocol = c.Tunnel + } + c.Tunnel = "" + if c.TunnelPort == 0 { - switch c.Tunnel { - case TunnelDisabled: - // tunnel might still be used by eg. EgressGW - c.TunnelPort = defaults.TunnelPortVXLAN - case TunnelVXLAN: - c.TunnelPort = defaults.TunnelPortVXLAN - case TunnelGeneve: + // manually pick port for native-routing and DSR with Geneve dispatch: + if !c.TunnelingEnabled() && + (c.EnableNodePort || c.KubeProxyReplacement == KubeProxyReplacementStrict) && + c.NodePortMode != NodePortModeSNAT && + c.LoadBalancerDSRDispatch == DSRDispatchGeneve { c.TunnelPort = defaults.TunnelPortGeneve + } else { + switch c.TunnelProtocol { + case TunnelVXLAN: + c.TunnelPort = defaults.TunnelPortVXLAN + case TunnelGeneve: + c.TunnelPort = defaults.TunnelPortGeneve + } } } @@ -3258,40 +3344,24 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EndpointStatus[option] = struct{}{} } - if c.MonitorQueueSize == 0 { - c.MonitorQueueSize = getDefaultMonitorQueueSize(runtime.NumCPU()) - } - - // Metrics Setup - metrics.ResetMetrics() - defaultMetrics := metrics.DefaultMetrics() - flagMetrics := append(vp.GetStringSlice(Metrics), c.additionalMetrics()...) - for _, metric := range flagMetrics { - switch metric[0] { - case '+': - defaultMetrics[metric[1:]] = struct{}{} - case '-': - delete(defaultMetrics, metric[1:]) - } - } - var collectors []prometheus.Collector - metricsSlice := common.MapStringStructToSlice(defaultMetrics) - c.MetricsConfig, collectors = metrics.CreateConfiguration(metricsSlice) - metrics.MustRegister(collectors...) - if err := c.parseExcludedLocalAddresses(vp.GetStringSlice(ExcludeLocalAddress)); err != nil { log.WithError(err).Fatalf("Unable to parse excluded local addresses") } + // Ensure CiliumEndpointSlice is enabled only if CiliumEndpointCRD is enabled too. + c.EnableCiliumEndpointSlice = vp.GetBool(EnableCiliumEndpointSlice) + if c.EnableCiliumEndpointSlice && c.DisableCiliumEndpointCRD { + log.Fatalf("Running Cilium with %s=%t requires %s set to false to enable CiliumEndpoint CRDs.", + EnableCiliumEndpointSlice, c.EnableCiliumEndpointSlice, DisableCiliumEndpointCRDName) + } + c.IdentityAllocationMode = vp.GetString(IdentityAllocationMode) switch c.IdentityAllocationMode { // This is here for tests. Some call Populate without the normal init case "": c.IdentityAllocationMode = IdentityAllocationModeKVstore - case IdentityAllocationModeKVstore, IdentityAllocationModeCRD: // c.IdentityAllocationMode is set above - default: log.Fatalf("Invalid identity allocation mode %q. It must be one of %s or %s", c.IdentityAllocationMode, IdentityAllocationModeKVstore, IdentityAllocationModeCRD) } @@ -3320,6 +3390,11 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.K8sRequireIPv6PodCIDR = true } } + if m, err := command.GetStringMapStringE(vp, IPAMMultiPoolPreAllocation); err != nil { + log.Fatalf("unable to parse %s: %s", IPAMMultiPoolPreAllocation, err) + } else { + c.IPAMMultiPoolPreAllocation = m + } c.KubeProxyReplacementHealthzBindAddr = vp.GetString(KubeProxyReplacementHealthzBindAddr) @@ -3348,9 +3423,9 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.HubbleRecorderStoragePath = vp.GetString(HubbleRecorderStoragePath) c.HubbleRecorderSinkQueueSize = vp.GetInt(HubbleRecorderSinkQueueSize) c.HubbleSkipUnknownCGroupIDs = vp.GetBool(HubbleSkipUnknownCGroupIDs) + c.HubbleMonitorEvents = vp.GetStringSlice(HubbleMonitorEvents) c.DisableIptablesFeederRules = vp.GetStringSlice(DisableIptablesFeederRules) - c.EnableCiliumEndpointSlice = vp.GetBool(EnableCiliumEndpointSlice) // Hidden options c.CompilerFlags = vp.GetStringSlice(CompilerFlags) @@ -3363,6 +3438,7 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EndpointQueueSize = sanitizeIntParam(vp, EndpointQueueSize, defaults.EndpointQueueSize) c.DisableCNPStatusUpdates = vp.GetBool(DisableCNPStatusUpdates) c.EnableICMPRules = vp.GetBool(EnableICMPRules) + c.UseCiliumInternalIPForIPsec = vp.GetBool(UseCiliumInternalIPForIPsec) c.BypassIPAvailabilityUponRestore = vp.GetBool(BypassIPAvailabilityUponRestore) c.EnableK8sTerminatingEndpoint = vp.GetBool(EnableK8sTerminatingEndpoint) c.EnableStaleCiliumEndpointCleanup = vp.GetBool(EnableStaleCiliumEndpointCleanup) @@ -3394,17 +3470,6 @@ func (c *DaemonConfig) Populate(vp *viper.Viper) { c.EnableK8sNetworkPolicy = vp.GetBool(EnableK8sNetworkPolicy) } -func (c *DaemonConfig) additionalMetrics() []string { - addMetric := func(name string) string { - return "+" + metrics.Namespace + "_" + name - } - var m []string - if c.DNSProxyConcurrencyLimit > 0 { - m = append(m, addMetric(metrics.SubsystemFQDN+"_semaphore_rejected_total")) - } - return m -} - func (c *DaemonConfig) populateDevices(vp *viper.Viper) { c.devices = vp.GetStringSlice(Devices) @@ -3573,12 +3638,12 @@ func (c *DaemonConfig) checkMapSizeLimits() error { } func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error { - if c.GetIPv4NativeRoutingCIDR() == nil && c.EnableIPv4Masquerade && c.Tunnel == TunnelDisabled && + if c.GetIPv4NativeRoutingCIDR() == nil && c.EnableIPv4Masquerade && !c.TunnelingEnabled() && c.IPAMMode() != ipamOption.IPAMENI && c.EnableIPv4 && c.IPAMMode() != ipamOption.IPAMAlibabaCloud { return fmt.Errorf( "native routing cidr must be configured with option --%s "+ "in combination with --%s --%s=%s --%s=%s --%s=true", - IPv4NativeRoutingCIDR, EnableIPv4Masquerade, TunnelName, c.Tunnel, + IPv4NativeRoutingCIDR, EnableIPv4Masquerade, RoutingMode, RoutingModeNative, IPAM, c.IPAMMode(), EnableIPv4Name) } @@ -3586,12 +3651,12 @@ func (c *DaemonConfig) checkIPv4NativeRoutingCIDR() error { } func (c *DaemonConfig) checkIPv6NativeRoutingCIDR() error { - if c.GetIPv6NativeRoutingCIDR() == nil && c.EnableIPv6Masquerade && c.Tunnel == TunnelDisabled && + if c.GetIPv6NativeRoutingCIDR() == nil && c.EnableIPv6Masquerade && !c.TunnelingEnabled() && c.EnableIPv6 { return fmt.Errorf( "native routing cidr must be configured with option --%s "+ "in combination with --%s --%s=%s --%s=true", - IPv6NativeRoutingCIDR, EnableIPv6Masquerade, TunnelName, c.Tunnel, + IPv6NativeRoutingCIDR, EnableIPv6Masquerade, RoutingMode, RoutingModeNative, EnableIPv6Name) } @@ -4062,7 +4127,7 @@ func EndpointStatusValuesMap() (values map[string]struct{}) { // place. func MightAutoDetectDevices() bool { devices := Config.GetDevices() - return ((Config.EnableHostFirewall || Config.EnableWireguard) && len(devices) == 0) || + return ((Config.EnableHostFirewall || Config.EnableWireguard || Config.EnableHighScaleIPcache) && len(devices) == 0) || (Config.KubeProxyReplacement != KubeProxyReplacementDisabled && (len(devices) == 0 || Config.DirectRoutingDevice == "")) } diff --git a/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go b/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go deleted file mode 100644 index 4eddccc41..000000000 --- a/vendor/github.com/cilium/cilium/pkg/safeio/safeio.go +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package safeio - -import ( - "fmt" - "io" -) - -// ErrLimitReached indicates that ReadAllLimit has -// reached its limit before completing a full read -// of the io.Reader. -var ErrLimitReached = fmt.Errorf("read limit reached") - -// ByteSize expresses the size of bytes -type ByteSize float64 - -const ( - _ = iota // ignore first value by assigning to blank identifier - // KB is a Kilobyte - KB ByteSize = 1 << (10 * iota) - // MB is a Megabyte - MB - // GB is a Gigabyte - GB - // TB is a Terabyte - TB - // PB is a Petabyte - PB - // EB is an Exabyte - EB - // ZB is a Zettabyte - ZB - // YB is a Yottabyte - YB -) - -// String converts a ByteSize to a string -func (b ByteSize) String() string { - switch { - case b >= YB: - return fmt.Sprintf("%.1fYB", b/YB) - case b >= ZB: - return fmt.Sprintf("%.1fZB", b/ZB) - case b >= EB: - return fmt.Sprintf("%.1fEB", b/EB) - case b >= PB: - return fmt.Sprintf("%.1fPB", b/PB) - case b >= TB: - return fmt.Sprintf("%.1fTB", b/TB) - case b >= GB: - return fmt.Sprintf("%.1fGB", b/GB) - case b >= MB: - return fmt.Sprintf("%.1fMB", b/MB) - case b >= KB: - return fmt.Sprintf("%.1fKB", b/KB) - } - return fmt.Sprintf("%.1fB", b) -} - -// ReadAllLimit reads from r until an error, EOF, or after n bytes and returns -// the data it read. A successful call returns err == nil, not err == EOF. -// Because ReadAllLimit is defined to read from src until EOF it does not -// treat an EOF from Read as an error to be reported. If the limit is reached -// ReadAllLimit will return ErrLimitReached as an error. -func ReadAllLimit(r io.Reader, n ByteSize) ([]byte, error) { - limit := int(n + 1) - buf, err := io.ReadAll(io.LimitReader(r, int64(limit))) - if err != nil { - return buf, err - } - if len(buf) >= limit { - return buf[:limit-1], ErrLimitReached - } - return buf, nil -} diff --git a/vendor/github.com/cilium/cilium/pkg/safetime/doc.go b/vendor/github.com/cilium/cilium/pkg/safetime/doc.go deleted file mode 100644 index 11ecf3c5f..000000000 --- a/vendor/github.com/cilium/cilium/pkg/safetime/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -// Package safetime contains a wrapper function for time.Since to deal with -// negative durations. -package safetime diff --git a/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go b/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go deleted file mode 100644 index 99240e626..000000000 --- a/vendor/github.com/cilium/cilium/pkg/safetime/safetime.go +++ /dev/null @@ -1,42 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package safetime - -import ( - "runtime" - "time" - - "github.com/sirupsen/logrus" - - "github.com/cilium/cilium/pkg/logging/logfields" -) - -// TimeSinceSafe returns the duration since t. If the duration is negative, -// returns false to indicate the fact. -// -// Used to workaround a malfunctioning monotonic clock. -func TimeSinceSafe(t time.Time, logger *logrus.Entry) (time.Duration, bool) { - n := time.Now() - d := n.Sub(t) - - if d < 0 { - logger = logger.WithFields(logrus.Fields{ - logfields.StartTime: t, - logfields.EndTime: n, - logfields.Duration: d, - }) - _, file, line, ok := runtime.Caller(1) - if ok { - logger = logger.WithFields(logrus.Fields{ - logfields.Path: file, - logfields.Line: line, - }) - } - logger.Warn("BUG: negative duration") - - return time.Duration(0), false - } - - return d, true -} diff --git a/vendor/github.com/cilium/cilium/pkg/slices/doc.go b/vendor/github.com/cilium/cilium/pkg/slices/doc.go new file mode 100644 index 000000000..b5bd209a8 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/slices/doc.go @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +// Package slices contains common utilities to work on slices of any type. +package slices diff --git a/vendor/github.com/cilium/cilium/pkg/slices/slices.go b/vendor/github.com/cilium/cilium/pkg/slices/slices.go new file mode 100644 index 000000000..243a00819 --- /dev/null +++ b/vendor/github.com/cilium/cilium/pkg/slices/slices.go @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package slices + +import ( + "sort" + + "golang.org/x/exp/constraints" + "golang.org/x/exp/slices" +) + +// Unique deduplicates the elements in the input slice, preserving their ordering and +// modifying the slice in place. +// Unique relies on a map to find multiple occurrences of the same elements. +// For slices with a size less than 192 elements, a simpler O(N^2) search algorithm +// that does not allocate memory is used instead. +// Limit of 192 has been experimentally derived (look at BenchmarkUnique for more information). +func Unique[S ~[]T, T comparable](s S) S { + if len(s) < 2 { + return s + } + + last := 0 + + if len(s) < 192 { + Loop: + for i := 0; i < len(s); i++ { + for j := 0; j < last; j++ { + if s[i] == s[j] { + continue Loop + } + } + s[last] = s[i] + last++ + } + } else { + set := make(map[T]struct{}, len(s)) + for i := 0; i < len(s); i++ { + if _, ok := set[s[i]]; ok { + continue + } + set[s[i]] = struct{}{} + s[last] = s[i] + last++ + } + } + + return s[:last] +} + +// UniqueFunc deduplicates the elements in the input slice like Unique, but takes a +// function to extract the comparable "key" to compare T. This is slower than Unique, +// but can be used with non-comparable elements. +func UniqueFunc[S ~[]T, T any, K comparable](s S, key func(i int) K) S { + if len(s) < 2 { + return s + } + + last := 0 + + set := make(map[K]struct{}, len(s)) + for i := 0; i < len(s); i++ { + if _, ok := set[key(i)]; ok { + continue + } + set[key(i)] = struct{}{} + s[last] = s[i] + last++ + } + + return s[:last] +} + +// SortedUnique sorts and dedup the input slice in place. +// It uses the < operator to compare the elements in the slice and thus requires +// the elements to satisfies contraints.Ordered. +func SortedUnique[S ~[]T, T constraints.Ordered](s S) S { + if len(s) < 2 { + return s + } + + sort.Slice(s, func(i, j int) bool { + return s[i] < s[j] + }) + return slices.Compact(s) +} + +// SortedUniqueFunc is like SortedUnique but allows the user to specify custom functions +// for ordering (less function) and comparing (eq function) the elements in the slice. +// This is useful in all the cases where SortedUnique cannot be used: +// - for types that do not satisfy constraints.Ordered (e.g: composite types) +// - when the user wants to customize how elements are compared (e.g: user wants to enforce reverse ordering) +func SortedUniqueFunc[S ~[]T, T any]( + s S, + less func(i, j int) bool, + eq func(a, b T) bool, +) S { + if len(s) < 2 { + return s + } + + sort.Slice(s, less) + return slices.CompactFunc(s, eq) +} + +// Diff returns a slice of elements which is the difference of a and b. +// The returned slice keeps the elements in the same order found in the "a" slice. +// Both input slices are considered as sets, that is, all elements are considered as +// unique when computing the difference. +func Diff[S ~[]T, T comparable](a, b S) []T { + if len(a) == 0 { + return nil + } + if len(b) == 0 { + return a + } + + var diff []T + + setB := make(map[T]struct{}, len(b)) + for _, v := range b { + setB[v] = struct{}{} + } + + setA := make(map[T]struct{}, len(a)) + for _, v := range a { + // v is in b, too + if _, ok := setB[v]; ok { + continue + } + // v has been already added to diff + if _, ok := setA[v]; ok { + continue + } + diff = append(diff, v) + setA[v] = struct{}{} + } + return diff +} + +// SubsetOf returns a boolean that indicates if slice a is a subset of slice b. +// In case it is not, the returned slice contains all the unique elements that are in a but not in b. +func SubsetOf[S ~[]T, T comparable](a, b S) (bool, []T) { + d := Diff(a, b) + return len(d) == 0, d +} diff --git a/vendor/github.com/cilium/cilium/pkg/spanstat/doc.go b/vendor/github.com/cilium/cilium/pkg/spanstat/doc.go deleted file mode 100644 index 61b9dc812..000000000 --- a/vendor/github.com/cilium/cilium/pkg/spanstat/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -// Package spanstat provides a mechanism to measure duration of multiple spans -// and add them up to a total duration -package spanstat diff --git a/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go b/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go deleted file mode 100644 index e3f7dbb6c..000000000 --- a/vendor/github.com/cilium/cilium/pkg/spanstat/spanstat.go +++ /dev/null @@ -1,114 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Authors of Cilium - -package spanstat - -import ( - "time" - - "github.com/cilium/cilium/pkg/lock" - "github.com/cilium/cilium/pkg/logging" - "github.com/cilium/cilium/pkg/logging/logfields" - "github.com/cilium/cilium/pkg/safetime" -) - -var ( - subSystem = "spanstat" - log = logging.DefaultLogger.WithField(logfields.LogSubsys, subSystem) -) - -// SpanStat measures the total duration of all time spent in between Start() -// and Stop() calls. -type SpanStat struct { - mutex lock.RWMutex - spanStart time.Time - successDuration time.Duration - failureDuration time.Duration -} - -// Start creates a new SpanStat and starts it -func Start() *SpanStat { - s := &SpanStat{} - return s.Start() -} - -// Start starts a new span -func (s *SpanStat) Start() *SpanStat { - s.mutex.Lock() - defer s.mutex.Unlock() - s.spanStart = time.Now() - return s -} - -// EndError calls End() based on the value of err -func (s *SpanStat) EndError(err error) *SpanStat { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.end(err == nil) -} - -// End ends the current span and adds the measured duration to the total -// cumulated duration, and to the success or failure cumulated duration -// depending on the given success flag -func (s *SpanStat) End(success bool) *SpanStat { - s.mutex.Lock() - defer s.mutex.Unlock() - return s.end(success) -} - -// must be called with Lock() held -func (s *SpanStat) end(success bool) *SpanStat { - if !s.spanStart.IsZero() { - d, _ := safetime.TimeSinceSafe(s.spanStart, log) - if success { - s.successDuration += d - } else { - s.failureDuration += d - } - } - s.spanStart = time.Time{} - return s -} - -// Total returns the total duration of all spans measured, including both -// successes and failures -func (s *SpanStat) Total() time.Duration { - s.mutex.RLock() - defer s.mutex.RUnlock() - return s.successDuration + s.failureDuration -} - -// SuccessTotal returns the total duration of all successful spans measured -func (s *SpanStat) SuccessTotal() time.Duration { - s.mutex.RLock() - defer s.mutex.RUnlock() - return s.successDuration -} - -// FailureTotal returns the total duration of all unsuccessful spans measured -func (s *SpanStat) FailureTotal() time.Duration { - s.mutex.RLock() - defer s.mutex.RUnlock() - return s.failureDuration -} - -// Reset rests the duration measurements -func (s *SpanStat) Reset() { - s.mutex.Lock() - defer s.mutex.Unlock() - s.successDuration = 0 - s.failureDuration = 0 -} - -// Seconds returns the number of seconds represents by the spanstat. If a span -// is still open, it is closed first. -func (s *SpanStat) Seconds() float64 { - s.mutex.Lock() - defer s.mutex.Unlock() - if !s.spanStart.IsZero() { - s.end(true) - } - - total := s.successDuration + s.failureDuration - return total.Seconds() -} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml index 94ff801df..0cffafa7b 100644 --- a/vendor/github.com/go-logr/logr/.golangci.yaml +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -6,7 +6,6 @@ linters: disable-all: true enable: - asciicheck - - deadcode - errcheck - forcetypeassert - gocritic @@ -18,10 +17,8 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck issues: exclude-use-default: false diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go index 9d92a38f1..99fe8be93 100644 --- a/vendor/github.com/go-logr/logr/discard.go +++ b/vendor/github.com/go-logr/logr/discard.go @@ -20,35 +20,5 @@ package logr // used whenever the caller is not interested in the logs. Logger instances // produced by this function always compare as equal. func Discard() Logger { - return Logger{ - level: 0, - sink: discardLogSink{}, - } -} - -// discardLogSink is a LogSink that discards all messages. -type discardLogSink struct{} - -// Verify that it actually implements the interface -var _ LogSink = discardLogSink{} - -func (l discardLogSink) Init(RuntimeInfo) { -} - -func (l discardLogSink) Enabled(int) bool { - return false -} - -func (l discardLogSink) Info(int, string, ...interface{}) { -} - -func (l discardLogSink) Error(error, string, ...interface{}) { -} - -func (l discardLogSink) WithValues(...interface{}) LogSink { - return l -} - -func (l discardLogSink) WithName(string) LogSink { - return l + return New(nil) } diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go deleted file mode 100644 index 7accdb0c4..000000000 --- a/vendor/github.com/go-logr/logr/funcr/funcr.go +++ /dev/null @@ -1,787 +0,0 @@ -/* -Copyright 2021 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package funcr implements formatting of structured log messages and -// optionally captures the call site and timestamp. -// -// The simplest way to use it is via its implementation of a -// github.com/go-logr/logr.LogSink with output through an arbitrary -// "write" function. See New and NewJSON for details. -// -// Custom LogSinks -// -// For users who need more control, a funcr.Formatter can be embedded inside -// your own custom LogSink implementation. This is useful when the LogSink -// needs to implement additional methods, for example. -// -// Formatting -// -// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for -// values which are being logged. When rendering a struct, funcr will use Go's -// standard JSON tags (all except "string"). -package funcr - -import ( - "bytes" - "encoding" - "fmt" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - "time" - - "github.com/go-logr/logr" -) - -// New returns a logr.Logger which is implemented by an arbitrary function. -func New(fn func(prefix, args string), opts Options) logr.Logger { - return logr.New(newSink(fn, NewFormatter(opts))) -} - -// NewJSON returns a logr.Logger which is implemented by an arbitrary function -// and produces JSON output. -func NewJSON(fn func(obj string), opts Options) logr.Logger { - fnWrapper := func(_, obj string) { - fn(obj) - } - return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) -} - -// Underlier exposes access to the underlying logging function. Since -// callers only have a logr.Logger, they have to know which -// implementation is in use, so this interface is less of an -// abstraction and more of a way to test type conversion. -type Underlier interface { - GetUnderlying() func(prefix, args string) -} - -func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { - l := &fnlogger{ - Formatter: formatter, - write: fn, - } - // For skipping fnlogger.Info and fnlogger.Error. - l.Formatter.AddCallDepth(1) - return l -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // LogCaller tells funcr to add a "caller" key to some or all log lines. - // This has some overhead, so some users might not want it. - LogCaller MessageClass - - // LogCallerFunc tells funcr to also log the calling function name. This - // has no effect if caller logging is not enabled (see Options.LogCaller). - LogCallerFunc bool - - // LogTimestamp tells funcr to add a "ts" key to log lines. This has some - // overhead, so some users might not want it. - LogTimestamp bool - - // TimestampFormat tells funcr how to render timestamps when LogTimestamp - // is enabled. If not specified, a default format will be used. For more - // details, see docs for Go's time.Layout. - TimestampFormat string - - // Verbosity tells funcr which V logs to produce. Higher values enable - // more logs. Info logs at or below this level will be written, while logs - // above this level will be discarded. - Verbosity int - - // RenderBuiltinsHook allows users to mutate the list of key-value pairs - // while a log line is being rendered. The kvList argument follows logr - // conventions - each pair of slice elements is comprised of a string key - // and an arbitrary value (verified and sanitized before calling this - // hook). The value returned must follow the same conventions. This hook - // can be used to audit or modify logged data. For example, you might want - // to prefix all of funcr's built-in keys with some string. This hook is - // only called for built-in (provided by funcr itself) key-value pairs. - // Equivalent hooks are offered for key-value pairs saved via - // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and - // for user-provided pairs (see RenderArgsHook). - RenderBuiltinsHook func(kvList []interface{}) []interface{} - - // RenderValuesHook is the same as RenderBuiltinsHook, except that it is - // only called for key-value pairs saved via logr.Logger.WithValues. See - // RenderBuiltinsHook for more details. - RenderValuesHook func(kvList []interface{}) []interface{} - - // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only - // called for key-value pairs passed directly to Info and Error. See - // RenderBuiltinsHook for more details. - RenderArgsHook func(kvList []interface{}) []interface{} - - // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct - // that contains a struct, etc.) it may log. Every time it finds a struct, - // slice, array, or map the depth is increased by one. When the maximum is - // reached, the value will be converted to a string indicating that the max - // depth has been exceeded. If this field is not specified, a default - // value will be used. - MaxLogDepth int -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// fnlogger inherits some of its LogSink implementation from Formatter -// and just needs to add some glue code. -type fnlogger struct { - Formatter - write func(prefix, args string) -} - -func (l fnlogger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l fnlogger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - l.write(prefix, args) -} - -func (l fnlogger) GetUnderlying() func(prefix, args string) { - return l.write -} - -// Assert conformance to the interfaces. -var _ logr.LogSink = &fnlogger{} -var _ logr.CallDepthLogSink = &fnlogger{} -var _ Underlier = &fnlogger{} - -// NewFormatter constructs a Formatter which emits a JSON-like key=value format. -func NewFormatter(opts Options) Formatter { - return newFormatter(opts, outputKeyValue) -} - -// NewFormatterJSON constructs a Formatter which emits strict JSON. -func NewFormatterJSON(opts Options) Formatter { - return newFormatter(opts, outputJSON) -} - -// Defaults for Options. -const defaultTimestampFormat = "2006-01-02 15:04:05.000000" -const defaultMaxLogDepth = 16 - -func newFormatter(opts Options, outfmt outputFormat) Formatter { - if opts.TimestampFormat == "" { - opts.TimestampFormat = defaultTimestampFormat - } - if opts.MaxLogDepth == 0 { - opts.MaxLogDepth = defaultMaxLogDepth - } - f := Formatter{ - outputFormat: outfmt, - prefix: "", - values: nil, - depth: 0, - opts: opts, - } - return f -} - -// Formatter is an opaque struct which can be embedded in a LogSink -// implementation. It should be constructed with NewFormatter. Some of -// its methods directly implement logr.LogSink. -type Formatter struct { - outputFormat outputFormat - prefix string - values []interface{} - valuesStr string - depth int - opts Options -} - -// outputFormat indicates which outputFormat to use. -type outputFormat int - -const ( - // outputKeyValue emits a JSON-like key=value format, but not strict JSON. - outputKeyValue outputFormat = iota - // outputJSON emits strict JSON. - outputJSON -) - -// PseudoStruct is a list of key-value pairs that gets logged as a struct. -type PseudoStruct []interface{} - -// render produces a log line, ready to use. -func (f Formatter) render(builtins, args []interface{}) string { - // Empirically bytes.Buffer is faster than strings.Builder for this. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - if f.outputFormat == outputJSON { - buf.WriteByte('{') - } - vals := builtins - if hook := f.opts.RenderBuiltinsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, false, false) // keys are ours, no need to escape - continuing := len(builtins) > 0 - if len(f.valuesStr) > 0 { - if continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - buf.WriteByte(' ') - } - } - continuing = true - buf.WriteString(f.valuesStr) - } - vals = args - if hook := f.opts.RenderArgsHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - f.flatten(buf, vals, continuing, true) // escape user-provided keys - if f.outputFormat == outputJSON { - buf.WriteByte('}') - } - return buf.String() -} - -// flatten renders a list of key-value pairs into a buffer. If continuing is -// true, it assumes that the buffer has previous values and will emit a -// separator (which depends on the output format) before the first pair it -// writes. If escapeKeys is true, the keys are assumed to have -// non-JSON-compatible characters in them and must be evaluated for escapes. -// -// This function returns a potentially modified version of kvList, which -// ensures that there is a value for every key (adding a value if needed) and -// that each key is a string (substituting a key if needed). -func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { - // This logic overlaps with sanitize() but saves one type-cast per key, - // which can be measurable. - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - k, ok := kvList[i].(string) - if !ok { - k = f.nonStringKey(kvList[i]) - kvList[i] = k - } - v := kvList[i+1] - - if i > 0 || continuing { - if f.outputFormat == outputJSON { - buf.WriteByte(',') - } else { - // In theory the format could be something we don't understand. In - // practice, we control it, so it won't be. - buf.WriteByte(' ') - } - } - - if escapeKeys { - buf.WriteString(prettyString(k)) - } else { - // this is faster - buf.WriteByte('"') - buf.WriteString(k) - buf.WriteByte('"') - } - if f.outputFormat == outputJSON { - buf.WriteByte(':') - } else { - buf.WriteByte('=') - } - buf.WriteString(f.pretty(v)) - } - return kvList -} - -func (f Formatter) pretty(value interface{}) string { - return f.prettyWithFlags(value, 0, 0) -} - -const ( - flagRawStruct = 0x1 // do not print braces on structs -) - -// TODO: This is not fast. Most of the overhead goes here. -func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { - if depth > f.opts.MaxLogDepth { - return `""` - } - - // Handle types that take full control of logging. - if v, ok := value.(logr.Marshaler); ok { - // Replace the value with what the type wants to get logged. - // That then gets handled below via reflection. - value = invokeMarshaler(v) - } - - // Handle types that want to format themselves. - switch v := value.(type) { - case fmt.Stringer: - value = invokeStringer(v) - case error: - value = invokeError(v) - } - - // Handling the most common types without reflect is a small perf win. - switch v := value.(type) { - case bool: - return strconv.FormatBool(v) - case string: - return prettyString(v) - case int: - return strconv.FormatInt(int64(v), 10) - case int8: - return strconv.FormatInt(int64(v), 10) - case int16: - return strconv.FormatInt(int64(v), 10) - case int32: - return strconv.FormatInt(int64(v), 10) - case int64: - return strconv.FormatInt(int64(v), 10) - case uint: - return strconv.FormatUint(uint64(v), 10) - case uint8: - return strconv.FormatUint(uint64(v), 10) - case uint16: - return strconv.FormatUint(uint64(v), 10) - case uint32: - return strconv.FormatUint(uint64(v), 10) - case uint64: - return strconv.FormatUint(v, 10) - case uintptr: - return strconv.FormatUint(uint64(v), 10) - case float32: - return strconv.FormatFloat(float64(v), 'f', -1, 32) - case float64: - return strconv.FormatFloat(v, 'f', -1, 64) - case complex64: - return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` - case complex128: - return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` - case PseudoStruct: - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - v = f.sanitize(v) - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - for i := 0; i < len(v); i += 2 { - if i > 0 { - buf.WriteByte(',') - } - k, _ := v[i].(string) // sanitize() above means no need to check success - // arbitrary keys might need escaping - buf.WriteString(prettyString(k)) - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - } - - buf := bytes.NewBuffer(make([]byte, 0, 256)) - t := reflect.TypeOf(value) - if t == nil { - return "null" - } - v := reflect.ValueOf(value) - switch t.Kind() { - case reflect.Bool: - return strconv.FormatBool(v.Bool()) - case reflect.String: - return prettyString(v.String()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(int64(v.Int()), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return strconv.FormatUint(uint64(v.Uint()), 10) - case reflect.Float32: - return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) - case reflect.Float64: - return strconv.FormatFloat(v.Float(), 'f', -1, 64) - case reflect.Complex64: - return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` - case reflect.Complex128: - return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` - case reflect.Struct: - if flags&flagRawStruct == 0 { - buf.WriteByte('{') - } - for i := 0; i < t.NumField(); i++ { - fld := t.Field(i) - if fld.PkgPath != "" { - // reflect says this field is only defined for non-exported fields. - continue - } - if !v.Field(i).CanInterface() { - // reflect isn't clear exactly what this means, but we can't use it. - continue - } - name := "" - omitempty := false - if tag, found := fld.Tag.Lookup("json"); found { - if tag == "-" { - continue - } - if comma := strings.Index(tag, ","); comma != -1 { - if n := tag[:comma]; n != "" { - name = n - } - rest := tag[comma:] - if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { - omitempty = true - } - } else { - name = tag - } - } - if omitempty && isEmpty(v.Field(i)) { - continue - } - if i > 0 { - buf.WriteByte(',') - } - if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) - continue - } - if name == "" { - name = fld.Name - } - // field names can't contain characters which need escaping - buf.WriteByte('"') - buf.WriteString(name) - buf.WriteByte('"') - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) - } - if flags&flagRawStruct == 0 { - buf.WriteByte('}') - } - return buf.String() - case reflect.Slice, reflect.Array: - buf.WriteByte('[') - for i := 0; i < v.Len(); i++ { - if i > 0 { - buf.WriteByte(',') - } - e := v.Index(i) - buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) - } - buf.WriteByte(']') - return buf.String() - case reflect.Map: - buf.WriteByte('{') - // This does not sort the map keys, for best perf. - it := v.MapRange() - i := 0 - for it.Next() { - if i > 0 { - buf.WriteByte(',') - } - // If a map key supports TextMarshaler, use it. - keystr := "" - if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { - txt, err := m.MarshalText() - if err != nil { - keystr = fmt.Sprintf("", err.Error()) - } else { - keystr = string(txt) - } - keystr = prettyString(keystr) - } else { - // prettyWithFlags will produce already-escaped values - keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) - if t.Key().Kind() != reflect.String { - // JSON only does string keys. Unlike Go's standard JSON, we'll - // convert just about anything to a string. - keystr = prettyString(keystr) - } - } - buf.WriteString(keystr) - buf.WriteByte(':') - buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) - i++ - } - buf.WriteByte('}') - return buf.String() - case reflect.Ptr, reflect.Interface: - if v.IsNil() { - return "null" - } - return f.prettyWithFlags(v.Elem().Interface(), 0, depth) - } - return fmt.Sprintf(`""`, t.Kind().String()) -} - -func prettyString(s string) string { - // Avoid escaping (which does allocations) if we can. - if needsEscape(s) { - return strconv.Quote(s) - } - b := bytes.NewBuffer(make([]byte, 0, 1024)) - b.WriteByte('"') - b.WriteString(s) - b.WriteByte('"') - return b.String() -} - -// needsEscape determines whether the input string needs to be escaped or not, -// without doing any allocations. -func needsEscape(s string) bool { - for _, r := range s { - if !strconv.IsPrint(r) || r == '\\' || r == '"' { - return true - } - } - return false -} - -func isEmpty(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Complex64, reflect.Complex128: - return v.Complex() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func invokeMarshaler(m logr.Marshaler) (ret interface{}) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return m.MarshalLog() -} - -func invokeStringer(s fmt.Stringer) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return s.String() -} - -func invokeError(e error) (ret string) { - defer func() { - if r := recover(); r != nil { - ret = fmt.Sprintf("", r) - } - }() - return e.Error() -} - -// Caller represents the original call site for a log line, after considering -// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and -// Line fields will always be provided, while the Func field is optional. -// Users can set the render hook fields in Options to examine logged key-value -// pairs, one of which will be {"caller", Caller} if the Options.LogCaller -// field is enabled for the given MessageClass. -type Caller struct { - // File is the basename of the file for this call site. - File string `json:"file"` - // Line is the line number in the file for this call site. - Line int `json:"line"` - // Func is the function name for this call site, or empty if - // Options.LogCallerFunc is not enabled. - Func string `json:"function,omitempty"` -} - -func (f Formatter) caller() Caller { - // +1 for this frame, +1 for Info/Error. - pc, file, line, ok := runtime.Caller(f.depth + 2) - if !ok { - return Caller{"", 0, ""} - } - fn := "" - if f.opts.LogCallerFunc { - if fp := runtime.FuncForPC(pc); fp != nil { - fn = fp.Name() - } - } - - return Caller{filepath.Base(file), line, fn} -} - -const noValue = "" - -func (f Formatter) nonStringKey(v interface{}) string { - return fmt.Sprintf("", f.snippet(v)) -} - -// snippet produces a short snippet string of an arbitrary value. -func (f Formatter) snippet(v interface{}) string { - const snipLen = 16 - - snip := f.pretty(v) - if len(snip) > snipLen { - snip = snip[:snipLen] - } - return snip -} - -// sanitize ensures that a list of key-value pairs has a value for every key -// (adding a value if needed) and that each key is a string (substituting a key -// if needed). -func (f Formatter) sanitize(kvList []interface{}) []interface{} { - if len(kvList)%2 != 0 { - kvList = append(kvList, noValue) - } - for i := 0; i < len(kvList); i += 2 { - _, ok := kvList[i].(string) - if !ok { - kvList[i] = f.nonStringKey(kvList[i]) - } - } - return kvList -} - -// Init configures this Formatter from runtime info, such as the call depth -// imposed by logr itself. -// Note that this receiver is a pointer, so depth can be saved. -func (f *Formatter) Init(info logr.RuntimeInfo) { - f.depth += info.CallDepth -} - -// Enabled checks whether an info message at the given level should be logged. -func (f Formatter) Enabled(level int) bool { - return level <= f.opts.Verbosity -} - -// GetDepth returns the current depth of this Formatter. This is useful for -// implementations which do their own caller attribution. -func (f Formatter) GetDepth() int { - return f.depth -} - -// FormatInfo renders an Info log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Info { - args = append(args, "caller", f.caller()) - } - args = append(args, "level", level, "msg", msg) - return prefix, f.render(args, kvList) -} - -// FormatError renders an Error log message into strings. The prefix will be -// empty when no names were set (via AddNames), or when the output is -// configured for JSON. -func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { - args := make([]interface{}, 0, 64) // using a constant here impacts perf - prefix = f.prefix - if f.outputFormat == outputJSON { - args = append(args, "logger", prefix) - prefix = "" - } - if f.opts.LogTimestamp { - args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) - } - if policy := f.opts.LogCaller; policy == All || policy == Error { - args = append(args, "caller", f.caller()) - } - args = append(args, "msg", msg) - var loggableErr interface{} - if err != nil { - loggableErr = err.Error() - } - args = append(args, "error", loggableErr) - return f.prefix, f.render(args, kvList) -} - -// AddName appends the specified name. funcr uses '/' characters to separate -// name elements. Callers should not pass '/' in the provided name string, but -// this library does not actually enforce that. -func (f *Formatter) AddName(name string) { - if len(f.prefix) > 0 { - f.prefix += "/" - } - f.prefix += name -} - -// AddValues adds key-value pairs to the set of saved values to be logged with -// each log line. -func (f *Formatter) AddValues(kvList []interface{}) { - // Three slice args forces a copy. - n := len(f.values) - f.values = append(f.values[:n:n], kvList...) - - vals := f.values - if hook := f.opts.RenderValuesHook; hook != nil { - vals = hook(f.sanitize(vals)) - } - - // Pre-render values, so we don't have to do it on each Info/Error call. - buf := bytes.NewBuffer(make([]byte, 0, 1024)) - f.flatten(buf, vals, false, true) // escape user-provided keys - f.valuesStr = buf.String() -} - -// AddCallDepth increases the number of stack-frames to skip when attributing -// the log line to a file and line. -func (f *Formatter) AddCallDepth(depth int) { - f.depth += depth -} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go index c3b56b3d2..e027aea3f 100644 --- a/vendor/github.com/go-logr/logr/logr.go +++ b/vendor/github.com/go-logr/logr/logr.go @@ -21,7 +21,7 @@ limitations under the License. // to back that API. Packages in the Go ecosystem can depend on this package, // while callers can implement logging with whatever backend is appropriate. // -// Usage +// # Usage // // Logging is done using a Logger instance. Logger is a concrete type with // methods, which defers the actual logging to a LogSink interface. The main @@ -30,16 +30,20 @@ limitations under the License. // "structured logging". // // With Go's standard log package, we might write: -// log.Printf("setting target value %s", targetValue) +// +// log.Printf("setting target value %s", targetValue) // // With logr's structured logging, we'd write: -// logger.Info("setting target", "value", targetValue) +// +// logger.Info("setting target", "value", targetValue) // // Errors are much the same. Instead of: -// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) // // We'd write: -// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// logger.Error(err, "failed to open the pod bay door", "user", user) // // Info() and Error() are very similar, but they are separate methods so that // LogSink implementations can choose to do things like attach additional @@ -47,7 +51,7 @@ limitations under the License. // always logged, regardless of the current verbosity. If there is no error // instance available, passing nil is valid. // -// Verbosity +// # Verbosity // // Often we want to log information only when the application in "verbose // mode". To write log lines that are more verbose, Logger has a V() method. @@ -58,20 +62,22 @@ limitations under the License. // Error messages do not have a verbosity level and are always logged. // // Where we might have written: -// if flVerbose >= 2 { -// log.Printf("an unusual thing happened") -// } +// +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } // // We can write: -// logger.V(2).Info("an unusual thing happened") // -// Logger Names +// logger.V(2).Info("an unusual thing happened") +// +// # Logger Names // // Logger instances can have name strings so that all messages logged through // that instance have additional context. For example, you might want to add // a subsystem name: // -// logger.WithName("compactor").Info("started", "time", time.Now()) +// logger.WithName("compactor").Info("started", "time", time.Now()) // // The WithName() method returns a new Logger, which can be passed to // constructors or other functions for further use. Repeated use of WithName() @@ -82,25 +88,27 @@ limitations under the License. // joining operation (e.g. whitespace, commas, periods, slashes, brackets, // quotes, etc). // -// Saved Values +// # Saved Values // // Logger instances can store any number of key/value pairs, which will be // logged alongside all messages logged through that instance. For example, // you might want to create a Logger instance per managed object: // // With the standard log package, we might write: -// log.Printf("decided to set field foo to value %q for object %s/%s", -// targetValue, object.Namespace, object.Name) +// +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) // // With logr we'd write: -// // Elsewhere: set up the logger to log the object name. -// obj.logger = mainLogger.WithValues( -// "name", obj.name, "namespace", obj.namespace) // -// // later on... -// obj.logger.Info("setting foo", "value", targetValue) +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) // -// Best Practices +// # Best Practices // // Logger has very few hard rules, with the goal that LogSink implementations // might have a lot of freedom to differentiate. There are, however, some @@ -124,15 +132,15 @@ limitations under the License. // around. For cases where passing a logger is optional, a pointer to Logger // should be used. // -// Key Naming Conventions +// # Key Naming Conventions // // Keys are not strictly required to conform to any specification or regex, but // it is recommended that they: -// * be human-readable and meaningful (not auto-generated or simple ordinals) -// * be constant (not dependent on input data) -// * contain only printable characters -// * not contain whitespace or punctuation -// * use lower case for simple keys and lowerCamelCase for more complex ones +// - be human-readable and meaningful (not auto-generated or simple ordinals) +// - be constant (not dependent on input data) +// - contain only printable characters +// - not contain whitespace or punctuation +// - use lower case for simple keys and lowerCamelCase for more complex ones // // These guidelines help ensure that log data is processed properly regardless // of the log implementation. For example, log implementations will try to @@ -141,51 +149,54 @@ limitations under the License. // While users are generally free to use key names of their choice, it's // generally best to avoid using the following keys, as they're frequently used // by implementations: -// * "caller": the calling information (file/line) of a particular log line -// * "error": the underlying error value in the `Error` method -// * "level": the log level -// * "logger": the name of the associated logger -// * "msg": the log message -// * "stacktrace": the stack trace associated with a particular log line or -// error (often from the `Error` message) -// * "ts": the timestamp for a log line +// - "caller": the calling information (file/line) of a particular log line +// - "error": the underlying error value in the `Error` method +// - "level": the log level +// - "logger": the name of the associated logger +// - "msg": the log message +// - "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// - "ts": the timestamp for a log line // // Implementations are encouraged to make use of these keys to represent the // above concepts, when necessary (for example, in a pure-JSON output form, it // would be necessary to represent at least message and timestamp as ordinary // named values). // -// Break Glass +// # Break Glass // // Implementations may choose to give callers access to the underlying // logging implementation. The recommended pattern for this is: -// // Underlier exposes access to the underlying logging implementation. -// // Since callers only have a logr.Logger, they have to know which -// // implementation is in use, so this interface is less of an abstraction -// // and more of way to test type conversion. -// type Underlier interface { -// GetUnderlying() -// } +// +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } // // Logger grants access to the sink to enable type assertions like this: -// func DoSomethingWithImpl(log logr.Logger) { -// if underlier, ok := log.GetSink()(impl.Underlier) { -// implLogger := underlier.GetUnderlying() -// ... -// } -// } +// +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink().(impl.Underlier); ok { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } // // Custom `With*` functions can be implemented by copying the complete // Logger struct and replacing the sink in the copy: -// // WithFooBar changes the foobar parameter in the log sink and returns a -// // new logger with that modified sink. It does nothing for loggers where -// // the sink doesn't support that parameter. -// func WithFoobar(log logr.Logger, foobar int) logr.Logger { -// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { -// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) -// } -// return log -// } +// +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } // // Don't use New to construct a new Logger with a LogSink retrieved from an // existing Logger. Source code attribution might not work correctly and @@ -201,11 +212,14 @@ import ( ) // New returns a new Logger instance. This is primarily used by libraries -// implementing LogSink, rather than end users. +// implementing LogSink, rather than end users. Passing a nil sink will create +// a Logger which discards all log lines. func New(sink LogSink) Logger { logger := Logger{} logger.setSink(sink) - sink.Init(runtimeInfo) + if sink != nil { + sink.Init(runtimeInfo) + } return logger } @@ -244,7 +258,7 @@ type Logger struct { // Enabled tests whether this Logger is enabled. For example, commandline // flags might be used to set the logging verbosity and disable some info logs. func (l Logger) Enabled() bool { - return l.sink.Enabled(l.level) + return l.sink != nil && l.sink.Enabled(l.level) } // Info logs a non-error message with the given key/value pairs as context. @@ -254,6 +268,9 @@ func (l Logger) Enabled() bool { // information. The key/value pairs must alternate string keys and arbitrary // values. func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if l.Enabled() { if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() @@ -273,6 +290,9 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) { // triggered this log line, if present. The err parameter is optional // and nil may be passed instead of an error instance. func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if l.sink == nil { + return + } if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { withHelper.GetCallStackHelper()() } @@ -284,6 +304,9 @@ func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { // level means a log message is less important. Negative V-levels are treated // as 0. func (l Logger) V(level int) Logger { + if l.sink == nil { + return l + } if level < 0 { level = 0 } @@ -294,6 +317,9 @@ func (l Logger) V(level int) Logger { // WithValues returns a new Logger instance with additional key/value pairs. // See Info for documentation on how key/value pairs work. func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithValues(keysAndValues...)) return l } @@ -304,6 +330,9 @@ func (l Logger) WithValues(keysAndValues ...interface{}) Logger { // contain only letters, digits, and hyphens (see the package documentation for // more information). func (l Logger) WithName(name string) Logger { + if l.sink == nil { + return l + } l.setSink(l.sink.WithName(name)) return l } @@ -324,6 +353,9 @@ func (l Logger) WithName(name string) Logger { // WithCallDepth(1) because it works with implementions that support the // CallDepthLogSink and/or CallStackHelperLogSink interfaces. func (l Logger) WithCallDepth(depth int) Logger { + if l.sink == nil { + return l + } if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(depth)) } @@ -345,6 +377,9 @@ func (l Logger) WithCallDepth(depth int) Logger { // implementation does not support either of these, the original Logger will be // returned. func (l Logger) WithCallStackHelper() (func(), Logger) { + if l.sink == nil { + return func() {}, l + } var helper func() if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { l.setSink(withCallDepth.WithCallDepth(1)) @@ -357,6 +392,11 @@ func (l Logger) WithCallStackHelper() (func(), Logger) { return helper, l } +// IsZero returns true if this logger is an uninitialized zero value +func (l Logger) IsZero() bool { + return l.sink == nil +} + // contextKey is how we find Loggers in a context.Context. type contextKey struct{} @@ -442,7 +482,7 @@ type LogSink interface { WithName(name string) LogSink } -// CallDepthLogSink represents a Logger that knows how to climb the call stack +// CallDepthLogSink represents a LogSink that knows how to climb the call stack // to identify the original call site and can offset the depth by a specified // number of frames. This is useful for users who have helper functions // between the "real" call site and the actual calls to Logger methods. @@ -467,7 +507,7 @@ type CallDepthLogSink interface { WithCallDepth(depth int) LogSink } -// CallStackHelperLogSink represents a Logger that knows how to climb +// CallStackHelperLogSink represents a LogSink that knows how to climb // the call stack to identify the original call site and can skip // intermediate helper functions if they mark themselves as // helper. Go's testing package uses that approach. diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/go-logr/stdr/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md deleted file mode 100644 index 515866789..000000000 --- a/vendor/github.com/go-logr/stdr/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Minimal Go logging using logr and Go's standard library - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) - -This package implements the [logr interface](https://github.com/go-logr/logr) -in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go deleted file mode 100644 index 93a8aab51..000000000 --- a/vendor/github.com/go-logr/stdr/stdr.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2019 The logr Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package stdr implements github.com/go-logr/logr.Logger in terms of -// Go's standard log package. -package stdr - -import ( - "log" - "os" - - "github.com/go-logr/logr" - "github.com/go-logr/logr/funcr" -) - -// The global verbosity level. See SetVerbosity(). -var globalVerbosity int - -// SetVerbosity sets the global level against which all info logs will be -// compared. If this is greater than or equal to the "V" of the logger, the -// message will be logged. A higher value here means more logs will be written. -// The previous verbosity value is returned. This is not concurrent-safe - -// callers must be sure to call it from only one goroutine. -func SetVerbosity(v int) int { - old := globalVerbosity - globalVerbosity = v - return old -} - -// New returns a logr.Logger which is implemented by Go's standard log package, -// or something like it. If std is nil, this will use a default logger -// instead. -// -// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -func New(std StdLogger) logr.Logger { - return NewWithOptions(std, Options{}) -} - -// NewWithOptions returns a logr.Logger which is implemented by Go's standard -// log package, or something like it. See New for details. -func NewWithOptions(std StdLogger, opts Options) logr.Logger { - if std == nil { - // Go's log.Default() is only available in 1.16 and higher. - std = log.New(os.Stderr, "", log.LstdFlags) - } - - if opts.Depth < 0 { - opts.Depth = 0 - } - - fopts := funcr.Options{ - LogCaller: funcr.MessageClass(opts.LogCaller), - } - - sl := &logger{ - Formatter: funcr.NewFormatter(fopts), - std: std, - } - - // For skipping our own logger.Info/Error. - sl.Formatter.AddCallDepth(1 + opts.Depth) - - return logr.New(sl) -} - -// Options carries parameters which influence the way logs are generated. -type Options struct { - // Depth biases the assumed number of call frames to the "true" caller. - // This is useful when the calling code calls a function which then calls - // stdr (e.g. a logging shim to another API). Values less than zero will - // be treated as zero. - Depth int - - // LogCaller tells stdr to add a "caller" key to some or all log lines. - // Go's log package has options to log this natively, too. - LogCaller MessageClass - - // TODO: add an option to log the date/time -} - -// MessageClass indicates which category or categories of messages to consider. -type MessageClass int - -const ( - // None ignores all message classes. - None MessageClass = iota - // All considers all message classes. - All - // Info only considers info messages. - Info - // Error only considers error messages. - Error -) - -// StdLogger is the subset of the Go stdlib log.Logger API that is needed for -// this adapter. -type StdLogger interface { - // Output is the same as log.Output and log.Logger.Output. - Output(calldepth int, logline string) error -} - -type logger struct { - funcr.Formatter - std StdLogger -} - -var _ logr.LogSink = &logger{} -var _ logr.CallDepthLogSink = &logger{} - -func (l logger) Enabled(level int) bool { - return globalVerbosity >= level -} - -func (l logger) Info(level int, msg string, kvList ...interface{}) { - prefix, args := l.FormatInfo(level, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) Error(err error, msg string, kvList ...interface{}) { - prefix, args := l.FormatError(err, msg, kvList) - if prefix != "" { - args = prefix + ": " + args - } - _ = l.std.Output(l.Formatter.GetDepth()+1, args) -} - -func (l logger) WithName(name string) logr.LogSink { - l.Formatter.AddName(name) - return &l -} - -func (l logger) WithValues(kvList ...interface{}) logr.LogSink { - l.Formatter.AddValues(kvList) - return &l -} - -func (l logger) WithCallDepth(depth int) logr.LogSink { - l.Formatter.AddCallDepth(depth) - return &l -} - -// Underlier exposes access to the underlying logging implementation. Since -// callers only have a logr.Logger, they have to know which implementation is -// in use, so this interface is less of an abstraction and more of way to test -// type conversion. -type Underlier interface { - GetUnderlying() StdLogger -} - -// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger -// is itself an interface, the result may or may not be a Go log.Logger. -func (l logger) GetUnderlying() StdLogger { - return l.std -} diff --git a/vendor/github.com/go-openapi/runtime/.editorconfig b/vendor/github.com/go-openapi/runtime/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/runtime/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/runtime/.gitattributes b/vendor/github.com/go-openapi/runtime/.gitattributes deleted file mode 100644 index d207b1802..000000000 --- a/vendor/github.com/go-openapi/runtime/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -*.go text eol=lf diff --git a/vendor/github.com/go-openapi/runtime/.gitignore b/vendor/github.com/go-openapi/runtime/.gitignore deleted file mode 100644 index fea8b84ec..000000000 --- a/vendor/github.com/go-openapi/runtime/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -*.cov -*.out -playground diff --git a/vendor/github.com/go-openapi/runtime/.golangci.yml b/vendor/github.com/go-openapi/runtime/.golangci.yml deleted file mode 100644 index b1aa7928a..000000000 --- a/vendor/github.com/go-openapi/runtime/.golangci.yml +++ /dev/null @@ -1,44 +0,0 @@ -linters-settings: - govet: - # Using err repeatedly considered as shadowing. - check-shadowing: false - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 -linters: - disable: - - maligned - - lll - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - - testpackage - - gci - - gofumpt - - goerr113 - - gomnd - - tparallel - - nestif - - godot - - errorlint - - noctx - - interfacer - - nilerr diff --git a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/runtime/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/runtime/README.md b/vendor/github.com/go-openapi/runtime/README.md deleted file mode 100644 index 5b1ec6494..000000000 --- a/vendor/github.com/go-openapi/runtime/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# runtime [![Build Status](https://travis-ci.org/go-openapi/runtime.svg?branch=client-context)](https://travis-ci.org/go-openapi/runtime) [![codecov](https://codecov.io/gh/go-openapi/runtime/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/runtime) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/runtime/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/runtime?status.svg)](http://godoc.org/github.com/go-openapi/runtime) - -# golang Open-API toolkit - runtime - -The runtime component for use in codegeneration or as untyped usage. diff --git a/vendor/github.com/go-openapi/runtime/bytestream.go b/vendor/github.com/go-openapi/runtime/bytestream.go deleted file mode 100644 index 6eb6ceb5c..000000000 --- a/vendor/github.com/go-openapi/runtime/bytestream.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -func defaultCloser() error { return nil } - -type byteStreamOpt func(opts *byteStreamOpts) - -// ClosesStream when the bytestream consumer or producer is finished -func ClosesStream(opts *byteStreamOpts) { - opts.Close = true -} - -type byteStreamOpts struct { - Close bool -} - -// ByteStreamConsumer creates a consumer for byte streams, -// takes a Writer/BinaryUnmarshaler interface or binary slice by reference, -// and reads from the provided reader -func ByteStreamConsumer(opts ...byteStreamOpt) Consumer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("ByteStreamConsumer requires a reader") // early exit - } - - close := defaultCloser - if vals.Close { - if cl, ok := reader.(io.Closer); ok { - close = cl.Close - } - } - //nolint:errcheck // closing a reader wouldn't fail. - defer close() - - if wrtr, ok := data.(io.Writer); ok { - _, err := io.Copy(wrtr, reader) - return err - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - if bu, ok := data.(encoding.BinaryUnmarshaler); ok { - return bu.UnmarshalBinary(b) - } - - if data != nil { - if str, ok := data.(*string); ok { - *str = string(b) - return nil - } - } - - if t := reflect.TypeOf(data); data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t = v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - v.SetBytes(b) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamConsumer, %s", - data, data, "can be resolved by supporting Writer/BinaryUnmarshaler interface") - }) -} - -// ByteStreamProducer creates a producer for byte streams, -// takes a Reader/BinaryMarshaler interface or binary slice, -// and writes to a writer (essentially a pipe) -func ByteStreamProducer(opts ...byteStreamOpt) Producer { - var vals byteStreamOpts - for _, opt := range opts { - opt(&vals) - } - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("ByteStreamProducer requires a writer") // early exit - } - close := defaultCloser - if vals.Close { - if cl, ok := writer.(io.Closer); ok { - close = cl.Close - } - } - //nolint:errcheck // TODO: closing a writer would fail. - defer close() - - if rc, ok := data.(io.ReadCloser); ok { - defer rc.Close() - } - - if rdr, ok := data.(io.Reader); ok { - _, err := io.Copy(writer, rdr) - return err - } - - if bm, ok := data.(encoding.BinaryMarshaler); ok { - bytes, err := bm.MarshalBinary() - if err != nil { - return err - } - - _, err = writer.Write(bytes) - return err - } - - if data != nil { - if str, ok := data.(string); ok { - _, err := writer.Write([]byte(str)) - return err - } - - if e, ok := data.(error); ok { - _, err := writer.Write([]byte(e.Error())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 { - _, err := writer.Write(v.Bytes()) - return err - } - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - } - - return fmt.Errorf("%v (%T) is not supported by the ByteStreamProducer, %s", - data, data, "can be resolved by supporting Reader/BinaryMarshaler interface") - }) -} diff --git a/vendor/github.com/go-openapi/runtime/client/auth_info.go b/vendor/github.com/go-openapi/runtime/client/auth_info.go deleted file mode 100644 index 4f26e9234..000000000 --- a/vendor/github.com/go-openapi/runtime/client/auth_info.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "encoding/base64" - - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// PassThroughAuth never manipulates the request -var PassThroughAuth runtime.ClientAuthInfoWriter - -func init() { - PassThroughAuth = runtime.ClientAuthInfoWriterFunc(func(_ runtime.ClientRequest, _ strfmt.Registry) error { return nil }) -} - -// BasicAuth provides a basic auth info writer -func BasicAuth(username, password string) runtime.ClientAuthInfoWriter { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - encoded := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - return r.SetHeaderParam(runtime.HeaderAuthorization, "Basic "+encoded) - }) -} - -// APIKeyAuth provides an API key auth info writer -func APIKeyAuth(name, in, value string) runtime.ClientAuthInfoWriter { - if in == "query" { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - return r.SetQueryParam(name, value) - }) - } - - if in == "header" { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - return r.SetHeaderParam(name, value) - }) - } - return nil -} - -// BearerToken provides a header based oauth2 bearer access token auth info writer -func BearerToken(token string) runtime.ClientAuthInfoWriter { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - return r.SetHeaderParam(runtime.HeaderAuthorization, "Bearer "+token) - }) -} - -// Compose combines multiple ClientAuthInfoWriters into a single one. -// Useful when multiple auth headers are needed. -func Compose(auths ...runtime.ClientAuthInfoWriter) runtime.ClientAuthInfoWriter { - return runtime.ClientAuthInfoWriterFunc(func(r runtime.ClientRequest, _ strfmt.Registry) error { - for _, auth := range auths { - if auth == nil { - continue - } - if err := auth.AuthenticateRequest(r, nil); err != nil { - return err - } - } - return nil - }) -} diff --git a/vendor/github.com/go-openapi/runtime/client/keepalive.go b/vendor/github.com/go-openapi/runtime/client/keepalive.go deleted file mode 100644 index bc7b7fa41..000000000 --- a/vendor/github.com/go-openapi/runtime/client/keepalive.go +++ /dev/null @@ -1,55 +0,0 @@ -package client - -import ( - "io" - "net/http" - "sync/atomic" -) - -// KeepAliveTransport drains the remaining body from a response -// so that go will reuse the TCP connections. -// This is not enabled by default because there are servers where -// the response never gets closed and that would make the code hang forever. -// So instead it's provided as a http client middleware that can be used to override -// any request. -func KeepAliveTransport(rt http.RoundTripper) http.RoundTripper { - return &keepAliveTransport{wrapped: rt} -} - -type keepAliveTransport struct { - wrapped http.RoundTripper -} - -func (k *keepAliveTransport) RoundTrip(r *http.Request) (*http.Response, error) { - resp, err := k.wrapped.RoundTrip(r) - if err != nil { - return resp, err - } - resp.Body = &drainingReadCloser{rdr: resp.Body} - return resp, nil -} - -type drainingReadCloser struct { - rdr io.ReadCloser - seenEOF uint32 -} - -func (d *drainingReadCloser) Read(p []byte) (n int, err error) { - n, err = d.rdr.Read(p) - if err == io.EOF || n == 0 { - atomic.StoreUint32(&d.seenEOF, 1) - } - return -} - -func (d *drainingReadCloser) Close() error { - // drain buffer - if atomic.LoadUint32(&d.seenEOF) != 1 { - // If the reader side (a HTTP server) is misbehaving, it still may send - // some bytes, but the closer ignores them to keep the underling - // connection open. - //nolint:errcheck - io.Copy(io.Discard, d.rdr) - } - return d.rdr.Close() -} diff --git a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go b/vendor/github.com/go-openapi/runtime/client/opentelemetry.go deleted file mode 100644 index 8a38ea3e9..000000000 --- a/vendor/github.com/go-openapi/runtime/client/opentelemetry.go +++ /dev/null @@ -1,207 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - "strings" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/strfmt" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.12.0" - "go.opentelemetry.io/otel/trace" -) - -const ( - instrumentationVersion = "1.0.0" - tracerName = "go-openapi" -) - -type config struct { - Tracer trace.Tracer - Propagator propagation.TextMapPropagator - SpanStartOptions []trace.SpanStartOption - SpanNameFormatter func(*runtime.ClientOperation) string - TracerProvider trace.TracerProvider -} - -type OpenTelemetryOpt interface { - apply(*config) -} - -type optionFunc func(*config) - -func (o optionFunc) apply(c *config) { - o(c) -} - -// WithTracerProvider specifies a tracer provider to use for creating a tracer. -// If none is specified, the global provider is used. -func WithTracerProvider(provider trace.TracerProvider) OpenTelemetryOpt { - return optionFunc(func(c *config) { - if provider != nil { - c.TracerProvider = provider - } - }) -} - -// WithPropagators configures specific propagators. If this -// option isn't specified, then the global TextMapPropagator is used. -func WithPropagators(ps propagation.TextMapPropagator) OpenTelemetryOpt { - return optionFunc(func(c *config) { - if ps != nil { - c.Propagator = ps - } - }) -} - -// WithSpanOptions configures an additional set of -// trace.SpanOptions, which are applied to each new span. -func WithSpanOptions(opts ...trace.SpanStartOption) OpenTelemetryOpt { - return optionFunc(func(c *config) { - c.SpanStartOptions = append(c.SpanStartOptions, opts...) - }) -} - -// WithSpanNameFormatter takes a function that will be called on every -// request and the returned string will become the Span Name. -func WithSpanNameFormatter(f func(op *runtime.ClientOperation) string) OpenTelemetryOpt { - return optionFunc(func(c *config) { - c.SpanNameFormatter = f - }) -} - -func defaultTransportFormatter(op *runtime.ClientOperation) string { - if op.ID != "" { - return op.ID - } - - return fmt.Sprintf("%s_%s", strings.ToLower(op.Method), op.PathPattern) -} - -type openTelemetryTransport struct { - transport runtime.ClientTransport - host string - tracer trace.Tracer - config *config -} - -func newOpenTelemetryTransport(transport runtime.ClientTransport, host string, opts []OpenTelemetryOpt) *openTelemetryTransport { - tr := &openTelemetryTransport{ - transport: transport, - host: host, - } - - defaultOpts := []OpenTelemetryOpt{ - WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), - WithSpanNameFormatter(defaultTransportFormatter), - WithPropagators(otel.GetTextMapPropagator()), - WithTracerProvider(otel.GetTracerProvider()), - } - - c := newConfig(append(defaultOpts, opts...)...) - tr.config = c - - return tr -} - -func (t *openTelemetryTransport) Submit(op *runtime.ClientOperation) (interface{}, error) { - if op.Context == nil { - return t.transport.Submit(op) - } - - params := op.Params - reader := op.Reader - - var span trace.Span - defer func() { - if span != nil { - span.End() - } - }() - - op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { - span = t.newOpenTelemetrySpan(op, req.GetHeaderParams()) - return params.WriteToRequest(req, reg) - }) - - op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - if span != nil { - statusCode := response.Code() - span.SetAttributes(attribute.Int(string(semconv.HTTPStatusCodeKey), statusCode)) - span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindClient)) - } - - return reader.ReadResponse(response, consumer) - }) - - submit, err := t.transport.Submit(op) - if err != nil && span != nil { - span.RecordError(err) - span.SetStatus(codes.Error, err.Error()) - } - - return submit, err -} - -func (t *openTelemetryTransport) newOpenTelemetrySpan(op *runtime.ClientOperation, header http.Header) trace.Span { - ctx := op.Context - - tracer := t.tracer - if tracer == nil { - if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { - tracer = newTracer(span.TracerProvider()) - } else { - tracer = newTracer(otel.GetTracerProvider()) - } - } - - ctx, span := tracer.Start(ctx, t.config.SpanNameFormatter(op), t.config.SpanStartOptions...) - - var scheme string - if len(op.Schemes) > 0 { - scheme = op.Schemes[0] - } - - span.SetAttributes( - attribute.String("net.peer.name", t.host), - attribute.String(string(semconv.HTTPRouteKey), op.PathPattern), - attribute.String(string(semconv.HTTPMethodKey), op.Method), - attribute.String("span.kind", trace.SpanKindClient.String()), - attribute.String("http.scheme", scheme), - ) - - carrier := propagation.HeaderCarrier(header) - t.config.Propagator.Inject(ctx, carrier) - - return span -} - -func newTracer(tp trace.TracerProvider) trace.Tracer { - return tp.Tracer(tracerName, trace.WithInstrumentationVersion(version())) -} - -func newConfig(opts ...OpenTelemetryOpt) *config { - c := &config{ - Propagator: otel.GetTextMapPropagator(), - } - - for _, opt := range opts { - opt.apply(c) - } - - // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. - if c.TracerProvider != nil { - c.Tracer = newTracer(c.TracerProvider) - } - - return c -} - -// Version is the current release version of the go-runtime instrumentation. -func version() string { - return instrumentationVersion -} diff --git a/vendor/github.com/go-openapi/runtime/client/opentracing.go b/vendor/github.com/go-openapi/runtime/client/opentracing.go deleted file mode 100644 index 627286d12..000000000 --- a/vendor/github.com/go-openapi/runtime/client/opentracing.go +++ /dev/null @@ -1,99 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - - "github.com/go-openapi/strfmt" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - - "github.com/go-openapi/runtime" -) - -type tracingTransport struct { - transport runtime.ClientTransport - host string - opts []opentracing.StartSpanOption -} - -func newOpenTracingTransport(transport runtime.ClientTransport, host string, opts []opentracing.StartSpanOption, -) runtime.ClientTransport { - return &tracingTransport{ - transport: transport, - host: host, - opts: opts, - } -} - -func (t *tracingTransport) Submit(op *runtime.ClientOperation) (interface{}, error) { - if op.Context == nil { - return t.transport.Submit(op) - } - - params := op.Params - reader := op.Reader - - var span opentracing.Span - defer func() { - if span != nil { - span.Finish() - } - }() - - op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { - span = createClientSpan(op, req.GetHeaderParams(), t.host, t.opts) - return params.WriteToRequest(req, reg) - }) - - op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { - if span != nil { - code := response.Code() - ext.HTTPStatusCode.Set(span, uint16(code)) - if code >= 400 { - ext.Error.Set(span, true) - } - } - return reader.ReadResponse(response, consumer) - }) - - submit, err := t.transport.Submit(op) - if err != nil && span != nil { - ext.Error.Set(span, true) - span.LogFields(log.Error(err)) - } - return submit, err -} - -func createClientSpan(op *runtime.ClientOperation, header http.Header, host string, - opts []opentracing.StartSpanOption) opentracing.Span { - ctx := op.Context - span := opentracing.SpanFromContext(ctx) - - if span != nil { - opts = append(opts, ext.SpanKindRPCClient) - span, _ = opentracing.StartSpanFromContextWithTracer( - ctx, span.Tracer(), operationName(op), opts...) - - ext.Component.Set(span, "go-openapi") - ext.PeerHostname.Set(span, host) - span.SetTag("http.path", op.PathPattern) - ext.HTTPMethod.Set(span, op.Method) - - _ = span.Tracer().Inject( - span.Context(), - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(header)) - - return span - } - return nil -} - -func operationName(op *runtime.ClientOperation) string { - if op.ID != "" { - return op.ID - } - return fmt.Sprintf("%s_%s", op.Method, op.PathPattern) -} diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go deleted file mode 100644 index d7dc564e9..000000000 --- a/vendor/github.com/go-openapi/runtime/client/request.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io" - "log" - "mime/multipart" - "net/http" - "net/textproto" - "net/url" - "os" - "path" - "path/filepath" - "strings" - "time" - - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// NewRequest creates a new swagger http client request -func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) { - return &request{ - pathPattern: pathPattern, - method: method, - writer: writer, - header: make(http.Header), - query: make(url.Values), - timeout: DefaultTimeout, - getBody: getRequestBuffer, - }, nil -} - -// Request represents a swagger client request. -// -// This Request struct converts to a HTTP request. -// There might be others that convert to other transports. -// There is no error checking here, it is assumed to be used after a spec has been validated. -// so impossible combinations should not arise (hopefully). -// -// The main purpose of this struct is to hide the machinery of adding params to a transport request. -// The generated code only implements what is necessary to turn a param into a valid value for these methods. -type request struct { - pathPattern string - method string - writer runtime.ClientRequestWriter - - pathParams map[string]string - header http.Header - query url.Values - formFields url.Values - fileFields map[string][]runtime.NamedReadCloser - payload interface{} - timeout time.Duration - buf *bytes.Buffer - - getBody func(r *request) []byte -} - -var ( - // ensure interface compliance - _ runtime.ClientRequest = new(request) -) - -func (r *request) isMultipart(mediaType string) bool { - if len(r.fileFields) > 0 { - return true - } - - return runtime.MultipartFormMime == mediaType -} - -// BuildHTTP creates a new http request based on the data from the params -func (r *request) BuildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry) (*http.Request, error) { - return r.buildHTTP(mediaType, basePath, producers, registry, nil) -} -func escapeQuotes(s string) string { - return strings.NewReplacer("\\", "\\\\", `"`, "\\\"").Replace(s) -} - -func logClose(err error, pw *io.PipeWriter) { - log.Println(err) - closeErr := pw.CloseWithError(err) - if closeErr != nil { - log.Println(closeErr) - } -} - -func (r *request) buildHTTP(mediaType, basePath string, producers map[string]runtime.Producer, registry strfmt.Registry, auth runtime.ClientAuthInfoWriter) (*http.Request, error) { - // build the data - if err := r.writer.WriteToRequest(r, registry); err != nil { - return nil, err - } - - // Our body must be an io.Reader. - // When we create the http.Request, if we pass it a - // bytes.Buffer then it will wrap it in an io.ReadCloser - // and set the content length automatically. - var body io.Reader - var pr *io.PipeReader - var pw *io.PipeWriter - - r.buf = bytes.NewBuffer(nil) - if r.payload != nil || len(r.formFields) > 0 || len(r.fileFields) > 0 { - body = r.buf - if r.isMultipart(mediaType) { - pr, pw = io.Pipe() - body = pr - } - } - - // check if this is a form type request - if len(r.formFields) > 0 || len(r.fileFields) > 0 { - if !r.isMultipart(mediaType) { - r.header.Set(runtime.HeaderContentType, mediaType) - formString := r.formFields.Encode() - r.buf.WriteString(formString) - goto DoneChoosingBodySource - } - - mp := multipart.NewWriter(pw) - r.header.Set(runtime.HeaderContentType, mangleContentType(mediaType, mp.Boundary())) - - go func() { - defer func() { - mp.Close() - pw.Close() - }() - - for fn, v := range r.formFields { - for _, vi := range v { - if err := mp.WriteField(fn, vi); err != nil { - logClose(err, pw) - return - } - } - } - - defer func() { - for _, ff := range r.fileFields { - for _, ffi := range ff { - ffi.Close() - } - } - }() - for fn, f := range r.fileFields { - for _, fi := range f { - // Need to read the data so that we can detect the content type - buf := make([]byte, 512) - size, err := fi.Read(buf) - if err != nil { - logClose(err, pw) - return - } - fileContentType := http.DetectContentType(buf) - newFi := runtime.NamedReader(fi.Name(), io.MultiReader(bytes.NewReader(buf[:size]), fi)) - - // Create the MIME headers for the new part - h := make(textproto.MIMEHeader) - h.Set("Content-Disposition", - fmt.Sprintf(`form-data; name="%s"; filename="%s"`, - escapeQuotes(fn), escapeQuotes(filepath.Base(fi.Name())))) - h.Set("Content-Type", fileContentType) - - wrtr, err := mp.CreatePart(h) - if err != nil { - logClose(err, pw) - return - } - if _, err := io.Copy(wrtr, newFi); err != nil { - logClose(err, pw) - } - } - } - }() - - goto DoneChoosingBodySource - } - - // if there is payload, use the producer to write the payload, and then - // set the header to the content-type appropriate for the payload produced - if r.payload != nil { - // TODO: infer most appropriate content type based on the producer used, - // and the `consumers` section of the spec/operation - r.header.Set(runtime.HeaderContentType, mediaType) - if rdr, ok := r.payload.(io.ReadCloser); ok { - body = rdr - goto DoneChoosingBodySource - } - - if rdr, ok := r.payload.(io.Reader); ok { - body = rdr - goto DoneChoosingBodySource - } - - producer := producers[mediaType] - if err := producer.Produce(r.buf, r.payload); err != nil { - return nil, err - } - } - -DoneChoosingBodySource: - - if runtime.CanHaveBody(r.method) && body != nil && r.header.Get(runtime.HeaderContentType) == "" { - r.header.Set(runtime.HeaderContentType, mediaType) - } - - if auth != nil { - // If we're not using r.buf as our http.Request's body, - // either the payload is an io.Reader or io.ReadCloser, - // or we're doing a multipart form/file. - // - // In those cases, if the AuthenticateRequest call asks for the body, - // we must read it into a buffer and provide that, then use that buffer - // as the body of our http.Request. - // - // This is done in-line with the GetBody() request rather than ahead - // of time, because there's no way to know if the AuthenticateRequest - // will even ask for the body of the request. - // - // If for some reason the copy fails, there's no way to return that - // error to the GetBody() call, so return it afterwards. - // - // An error from the copy action is prioritized over any error - // from the AuthenticateRequest call, because the mis-read - // body may have interfered with the auth. - // - var copyErr error - if buf, ok := body.(*bytes.Buffer); body != nil && (!ok || buf != r.buf) { - var copied bool - r.getBody = func(r *request) []byte { - if copied { - return getRequestBuffer(r) - } - - defer func() { - copied = true - }() - - if _, copyErr = io.Copy(r.buf, body); copyErr != nil { - return nil - } - - if closer, ok := body.(io.ReadCloser); ok { - if copyErr = closer.Close(); copyErr != nil { - return nil - } - } - - body = r.buf - return getRequestBuffer(r) - } - } - - authErr := auth.AuthenticateRequest(r, registry) - - if copyErr != nil { - return nil, fmt.Errorf("error retrieving the response body: %v", copyErr) - } - - if authErr != nil { - return nil, authErr - } - } - - // In case the basePath or the request pathPattern include static query parameters, - // parse those out before constructing the final path. The parameters themselves - // will be merged with the ones set by the client, with the priority given first to - // the ones set by the client, then the path pattern, and lastly the base path. - basePathURL, err := url.Parse(basePath) - if err != nil { - return nil, err - } - staticQueryParams := basePathURL.Query() - - pathPatternURL, err := url.Parse(r.pathPattern) - if err != nil { - return nil, err - } - for name, values := range pathPatternURL.Query() { - if _, present := staticQueryParams[name]; present { - staticQueryParams.Del(name) - } - for _, value := range values { - staticQueryParams.Add(name, value) - } - } - - // create http request - var reinstateSlash bool - if pathPatternURL.Path != "" && pathPatternURL.Path != "/" && pathPatternURL.Path[len(pathPatternURL.Path)-1] == '/' { - reinstateSlash = true - } - - urlPath := path.Join(basePathURL.Path, pathPatternURL.Path) - for k, v := range r.pathParams { - urlPath = strings.Replace(urlPath, "{"+k+"}", url.PathEscape(v), -1) - } - if reinstateSlash { - urlPath = urlPath + "/" - } - - req, err := http.NewRequest(r.method, urlPath, body) - if err != nil { - return nil, err - } - - originalParams := r.GetQueryParams() - - // Merge the query parameters extracted from the basePath with the ones set by - // the client in this struct. In case of conflict, the client wins. - for k, v := range staticQueryParams { - _, present := originalParams[k] - if !present { - if err = r.SetQueryParam(k, v...); err != nil { - return nil, err - } - } - } - - req.URL.RawQuery = r.query.Encode() - req.Header = r.header - - return req, nil -} - -func mangleContentType(mediaType, boundary string) string { - if strings.ToLower(mediaType) == runtime.URLencodedFormMime { - return fmt.Sprintf("%s; boundary=%s", mediaType, boundary) - } - return "multipart/form-data; boundary=" + boundary -} - -func (r *request) GetMethod() string { - return r.method -} - -func (r *request) GetPath() string { - path := r.pathPattern - for k, v := range r.pathParams { - path = strings.Replace(path, "{"+k+"}", v, -1) - } - return path -} - -func (r *request) GetBody() []byte { - return r.getBody(r) -} - -func getRequestBuffer(r *request) []byte { - if r.buf == nil { - return nil - } - return r.buf.Bytes() -} - -// SetHeaderParam adds a header param to the request -// when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) -func (r *request) SetHeaderParam(name string, values ...string) error { - if r.header == nil { - r.header = make(http.Header) - } - r.header[http.CanonicalHeaderKey(name)] = values - return nil -} - -// GetHeaderParams returns the all headers currently set for the request -func (r *request) GetHeaderParams() http.Header { - return r.header -} - -// SetQueryParam adds a query param to the request -// when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) -func (r *request) SetQueryParam(name string, values ...string) error { - if r.query == nil { - r.query = make(url.Values) - } - r.query[name] = values - return nil -} - -// GetQueryParams returns a copy of all query params currently set for the request -func (r *request) GetQueryParams() url.Values { - var result = make(url.Values) - for key, value := range r.query { - result[key] = append([]string{}, value...) - } - return result -} - -// SetFormParam adds a forn param to the request -// when there is only 1 value provided for the varargs, it will set it. -// when there are several values provided for the varargs it will add it (no overriding) -func (r *request) SetFormParam(name string, values ...string) error { - if r.formFields == nil { - r.formFields = make(url.Values) - } - r.formFields[name] = values - return nil -} - -// SetPathParam adds a path param to the request -func (r *request) SetPathParam(name string, value string) error { - if r.pathParams == nil { - r.pathParams = make(map[string]string) - } - - r.pathParams[name] = value - return nil -} - -// SetFileParam adds a file param to the request -func (r *request) SetFileParam(name string, files ...runtime.NamedReadCloser) error { - for _, file := range files { - if actualFile, ok := file.(*os.File); ok { - fi, err := os.Stat(actualFile.Name()) - if err != nil { - return err - } - if fi.IsDir() { - return fmt.Errorf("%q is a directory, only files are supported", file.Name()) - } - } - } - - if r.fileFields == nil { - r.fileFields = make(map[string][]runtime.NamedReadCloser) - } - if r.formFields == nil { - r.formFields = make(url.Values) - } - - r.fileFields[name] = files - return nil -} - -func (r *request) GetFileParam() map[string][]runtime.NamedReadCloser { - return r.fileFields -} - -// SetBodyParam sets a body parameter on the request. -// This does not yet serialze the object, this happens as late as possible. -func (r *request) SetBodyParam(payload interface{}) error { - r.payload = payload - return nil -} - -func (r *request) GetBodyParam() interface{} { - return r.payload -} - -// SetTimeout sets the timeout for a request -func (r *request) SetTimeout(timeout time.Duration) error { - r.timeout = timeout - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/client/response.go b/vendor/github.com/go-openapi/runtime/client/response.go deleted file mode 100644 index 0bbd388bc..000000000 --- a/vendor/github.com/go-openapi/runtime/client/response.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "io" - "net/http" - - "github.com/go-openapi/runtime" -) - -var _ runtime.ClientResponse = response{} - -func newResponse(resp *http.Response) runtime.ClientResponse { return response{resp: resp} } - -type response struct { - resp *http.Response -} - -func (r response) Code() int { - return r.resp.StatusCode -} - -func (r response) Message() string { - return r.resp.Status -} - -func (r response) GetHeader(name string) string { - return r.resp.Header.Get(name) -} - -func (r response) GetHeaders(name string) []string { - return r.resp.Header.Values(name) -} - -func (r response) Body() io.ReadCloser { - return r.resp.Body -} diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go deleted file mode 100644 index ccec04138..000000000 --- a/vendor/github.com/go-openapi/runtime/client/runtime.go +++ /dev/null @@ -1,537 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "encoding/pem" - "fmt" - "mime" - "net/http" - "net/http/httputil" - "os" - "strings" - "sync" - "time" - - "github.com/opentracing/opentracing-go" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/logger" - "github.com/go-openapi/runtime/middleware" - "github.com/go-openapi/runtime/yamlpc" - "github.com/go-openapi/strfmt" -) - -// TLSClientOptions to configure client authentication with mutual TLS -type TLSClientOptions struct { - // Certificate is the path to a PEM-encoded certificate to be used for - // client authentication. If set then Key must also be set. - Certificate string - - // LoadedCertificate is the certificate to be used for client authentication. - // This field is ignored if Certificate is set. If this field is set, LoadedKey - // is also required. - LoadedCertificate *x509.Certificate - - // Key is the path to an unencrypted PEM-encoded private key for client - // authentication. This field is required if Certificate is set. - Key string - - // LoadedKey is the key for client authentication. This field is required if - // LoadedCertificate is set. - LoadedKey crypto.PrivateKey - - // CA is a path to a PEM-encoded certificate that specifies the root certificate - // to use when validating the TLS certificate presented by the server. If this field - // (and LoadedCA) is not set, the system certificate pool is used. This field is ignored if LoadedCA - // is set. - CA string - - // LoadedCA specifies the root certificate to use when validating the server's TLS certificate. - // If this field (and CA) is not set, the system certificate pool is used. - LoadedCA *x509.Certificate - - // LoadedCAPool specifies a pool of RootCAs to use when validating the server's TLS certificate. - // If set, it will be combined with the the other loaded certificates (see LoadedCA and CA). - // If neither LoadedCA or CA is set, the provided pool with override the system - // certificate pool. - // The caller must not use the supplied pool after calling TLSClientAuth. - LoadedCAPool *x509.CertPool - - // ServerName specifies the hostname to use when verifying the server certificate. - // If this field is set then InsecureSkipVerify will be ignored and treated as - // false. - ServerName string - - // InsecureSkipVerify controls whether the certificate chain and hostname presented - // by the server are validated. If true, any certificate is accepted. - InsecureSkipVerify bool - - // VerifyPeerCertificate, if not nil, is called after normal - // certificate verification. It receives the raw ASN.1 certificates - // provided by the peer and also any verified chains that normal processing found. - // If it returns a non-nil error, the handshake is aborted and that error results. - // - // If normal verification fails then the handshake will abort before - // considering this callback. If normal verification is disabled by - // setting InsecureSkipVerify then this callback will be considered but - // the verifiedChains argument will always be nil. - VerifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error - - // SessionTicketsDisabled may be set to true to disable session ticket and - // PSK (resumption) support. Note that on clients, session ticket support is - // also disabled if ClientSessionCache is nil. - SessionTicketsDisabled bool - - // ClientSessionCache is a cache of ClientSessionState entries for TLS - // session resumption. It is only used by clients. - ClientSessionCache tls.ClientSessionCache - - // Prevents callers using unkeyed fields. - _ struct{} -} - -// TLSClientAuth creates a tls.Config for mutual auth -func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) { - // create client tls config - cfg := &tls.Config{} - - // load client cert if specified - if opts.Certificate != "" { - cert, err := tls.LoadX509KeyPair(opts.Certificate, opts.Key) - if err != nil { - return nil, fmt.Errorf("tls client cert: %v", err) - } - cfg.Certificates = []tls.Certificate{cert} - } else if opts.LoadedCertificate != nil { - block := pem.Block{Type: "CERTIFICATE", Bytes: opts.LoadedCertificate.Raw} - certPem := pem.EncodeToMemory(&block) - - var keyBytes []byte - switch k := opts.LoadedKey.(type) { - case *rsa.PrivateKey: - keyBytes = x509.MarshalPKCS1PrivateKey(k) - case *ecdsa.PrivateKey: - var err error - keyBytes, err = x509.MarshalECPrivateKey(k) - if err != nil { - return nil, fmt.Errorf("tls client priv key: %v", err) - } - default: - return nil, fmt.Errorf("tls client priv key: unsupported key type") - } - - block = pem.Block{Type: "PRIVATE KEY", Bytes: keyBytes} - keyPem := pem.EncodeToMemory(&block) - - cert, err := tls.X509KeyPair(certPem, keyPem) - if err != nil { - return nil, fmt.Errorf("tls client cert: %v", err) - } - cfg.Certificates = []tls.Certificate{cert} - } - - cfg.InsecureSkipVerify = opts.InsecureSkipVerify - - cfg.VerifyPeerCertificate = opts.VerifyPeerCertificate - cfg.SessionTicketsDisabled = opts.SessionTicketsDisabled - cfg.ClientSessionCache = opts.ClientSessionCache - - // When no CA certificate is provided, default to the system cert pool - // that way when a request is made to a server known by the system trust store, - // the name is still verified - if opts.LoadedCA != nil { - caCertPool := basePool(opts.LoadedCAPool) - caCertPool.AddCert(opts.LoadedCA) - cfg.RootCAs = caCertPool - } else if opts.CA != "" { - // load ca cert - caCert, err := os.ReadFile(opts.CA) - if err != nil { - return nil, fmt.Errorf("tls client ca: %v", err) - } - caCertPool := basePool(opts.LoadedCAPool) - caCertPool.AppendCertsFromPEM(caCert) - cfg.RootCAs = caCertPool - } else if opts.LoadedCAPool != nil { - cfg.RootCAs = opts.LoadedCAPool - } - - // apply servername overrride - if opts.ServerName != "" { - cfg.InsecureSkipVerify = false - cfg.ServerName = opts.ServerName - } - - return cfg, nil -} - -func basePool(pool *x509.CertPool) *x509.CertPool { - if pool == nil { - return x509.NewCertPool() - } - return pool -} - -// TLSTransport creates a http client transport suitable for mutual tls auth -func TLSTransport(opts TLSClientOptions) (http.RoundTripper, error) { - cfg, err := TLSClientAuth(opts) - if err != nil { - return nil, err - } - - return &http.Transport{TLSClientConfig: cfg}, nil -} - -// TLSClient creates a http.Client for mutual auth -func TLSClient(opts TLSClientOptions) (*http.Client, error) { - transport, err := TLSTransport(opts) - if err != nil { - return nil, err - } - return &http.Client{Transport: transport}, nil -} - -// DefaultTimeout the default request timeout -var DefaultTimeout = 30 * time.Second - -// Runtime represents an API client that uses the transport -// to make http requests based on a swagger specification. -type Runtime struct { - DefaultMediaType string - DefaultAuthentication runtime.ClientAuthInfoWriter - Consumers map[string]runtime.Consumer - Producers map[string]runtime.Producer - - Transport http.RoundTripper - Jar http.CookieJar - // Spec *spec.Document - Host string - BasePath string - Formats strfmt.Registry - Context context.Context - - Debug bool - logger logger.Logger - - clientOnce *sync.Once - client *http.Client - schemes []string - response ClientResponseFunc -} - -// New creates a new default runtime for a swagger api runtime.Client -func New(host, basePath string, schemes []string) *Runtime { - var rt Runtime - rt.DefaultMediaType = runtime.JSONMime - - // TODO: actually infer this stuff from the spec - rt.Consumers = map[string]runtime.Consumer{ - runtime.YAMLMime: yamlpc.YAMLConsumer(), - runtime.JSONMime: runtime.JSONConsumer(), - runtime.XMLMime: runtime.XMLConsumer(), - runtime.TextMime: runtime.TextConsumer(), - runtime.HTMLMime: runtime.TextConsumer(), - runtime.CSVMime: runtime.CSVConsumer(), - runtime.DefaultMime: runtime.ByteStreamConsumer(), - } - rt.Producers = map[string]runtime.Producer{ - runtime.YAMLMime: yamlpc.YAMLProducer(), - runtime.JSONMime: runtime.JSONProducer(), - runtime.XMLMime: runtime.XMLProducer(), - runtime.TextMime: runtime.TextProducer(), - runtime.HTMLMime: runtime.TextProducer(), - runtime.CSVMime: runtime.CSVProducer(), - runtime.DefaultMime: runtime.ByteStreamProducer(), - } - rt.Transport = http.DefaultTransport - rt.Jar = nil - rt.Host = host - rt.BasePath = basePath - rt.Context = context.Background() - rt.clientOnce = new(sync.Once) - if !strings.HasPrefix(rt.BasePath, "/") { - rt.BasePath = "/" + rt.BasePath - } - - rt.Debug = logger.DebugEnabled() - rt.logger = logger.StandardLogger{} - rt.response = newResponse - - if len(schemes) > 0 { - rt.schemes = schemes - } - return &rt -} - -// NewWithClient allows you to create a new transport with a configured http.Client -func NewWithClient(host, basePath string, schemes []string, client *http.Client) *Runtime { - rt := New(host, basePath, schemes) - if client != nil { - rt.clientOnce.Do(func() { - rt.client = client - }) - } - return rt -} - -// WithOpenTracing adds opentracing support to the provided runtime. -// A new client span is created for each request. -// If the context of the client operation does not contain an active span, no span is created. -// The provided opts are applied to each spans - for example to add global tags. -func (r *Runtime) WithOpenTracing(opts ...opentracing.StartSpanOption) runtime.ClientTransport { - return newOpenTracingTransport(r, r.Host, opts) -} - -// WithOpenTelemetry adds opentelemetry support to the provided runtime. -// A new client span is created for each request. -// If the context of the client operation does not contain an active span, no span is created. -// The provided opts are applied to each spans - for example to add global tags. -func (r *Runtime) WithOpenTelemetry(opts ...OpenTelemetryOpt) runtime.ClientTransport { - return newOpenTelemetryTransport(r, r.Host, opts) -} - -func (r *Runtime) pickScheme(schemes []string) string { - if v := r.selectScheme(r.schemes); v != "" { - return v - } - if v := r.selectScheme(schemes); v != "" { - return v - } - return "http" -} - -func (r *Runtime) selectScheme(schemes []string) string { - schLen := len(schemes) - if schLen == 0 { - return "" - } - - scheme := schemes[0] - // prefer https, but skip when not possible - if scheme != "https" && schLen > 1 { - for _, sch := range schemes { - if sch == "https" { - scheme = sch - break - } - } - } - return scheme -} - -func transportOrDefault(left, right http.RoundTripper) http.RoundTripper { - if left == nil { - return right - } - return left -} - -// EnableConnectionReuse drains the remaining body from a response -// so that go will reuse the TCP connections. -// -// This is not enabled by default because there are servers where -// the response never gets closed and that would make the code hang forever. -// So instead it's provided as a http client middleware that can be used to override -// any request. -func (r *Runtime) EnableConnectionReuse() { - if r.client == nil { - r.Transport = KeepAliveTransport( - transportOrDefault(r.Transport, http.DefaultTransport), - ) - return - } - - r.client.Transport = KeepAliveTransport( - transportOrDefault(r.client.Transport, - transportOrDefault(r.Transport, http.DefaultTransport), - ), - ) -} - -// takes a client operation and creates equivalent http.Request -func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { - params, _, auth := operation.Params, operation.Reader, operation.AuthInfo - - request, err := newRequest(operation.Method, operation.PathPattern, params) - if err != nil { - return nil, nil, err - } - - var accept []string - accept = append(accept, operation.ProducesMediaTypes...) - if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { - return nil, nil, err - } - - if auth == nil && r.DefaultAuthentication != nil { - auth = runtime.ClientAuthInfoWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error { - if req.GetHeaderParams().Get(runtime.HeaderAuthorization) != "" { - return nil - } - return r.DefaultAuthentication.AuthenticateRequest(req, reg) - }) - } - // if auth != nil { - // if err := auth.AuthenticateRequest(request, r.Formats); err != nil { - // return nil, err - // } - //} - - // TODO: pick appropriate media type - cmt := r.DefaultMediaType - for _, mediaType := range operation.ConsumesMediaTypes { - // Pick first non-empty media type - if mediaType != "" { - cmt = mediaType - break - } - } - - if _, ok := r.Producers[cmt]; !ok && cmt != runtime.MultipartFormMime && cmt != runtime.URLencodedFormMime { - return nil, nil, fmt.Errorf("none of producers: %v registered. try %s", r.Producers, cmt) - } - - req, err := request.buildHTTP(cmt, r.BasePath, r.Producers, r.Formats, auth) - if err != nil { - return nil, nil, err - } - req.URL.Scheme = r.pickScheme(operation.Schemes) - req.URL.Host = r.Host - req.Host = r.Host - return request, req, nil -} - -func (r *Runtime) CreateHttpRequest(operation *runtime.ClientOperation) (req *http.Request, err error) { - _, req, err = r.createHttpRequest(operation) - return -} - -// Submit a request and when there is a body on success it will turn that into the result -// all other things are turned into an api error for swagger which retains the status code -func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error) { - _, readResponse, _ := operation.Params, operation.Reader, operation.AuthInfo - - request, req, err := r.createHttpRequest(operation) - if err != nil { - return nil, err - } - - r.clientOnce.Do(func() { - r.client = &http.Client{ - Transport: r.Transport, - Jar: r.Jar, - } - }) - - if r.Debug { - b, err2 := httputil.DumpRequestOut(req, true) - if err2 != nil { - return nil, err2 - } - r.logger.Debugf("%s\n", string(b)) - } - - var hasTimeout bool - pctx := operation.Context - if pctx == nil { - pctx = r.Context - } else { - hasTimeout = true - } - if pctx == nil { - pctx = context.Background() - } - var ctx context.Context - var cancel context.CancelFunc - if hasTimeout { - ctx, cancel = context.WithCancel(pctx) - } else { - ctx, cancel = context.WithTimeout(pctx, request.timeout) - } - defer cancel() - - client := operation.Client - if client == nil { - client = r.client - } - req = req.WithContext(ctx) - res, err := client.Do(req) // make requests, by default follows 10 redirects before failing - if err != nil { - return nil, err - } - defer res.Body.Close() - - ct := res.Header.Get(runtime.HeaderContentType) - if ct == "" { // this should really really never occur - ct = r.DefaultMediaType - } - - if r.Debug { - printBody := true - if ct == runtime.DefaultMime { - printBody = false // Spare the terminal from a binary blob. - } - b, err2 := httputil.DumpResponse(res, printBody) - if err2 != nil { - return nil, err2 - } - r.logger.Debugf("%s\n", string(b)) - } - - mt, _, err := mime.ParseMediaType(ct) - if err != nil { - return nil, fmt.Errorf("parse content type: %s", err) - } - - cons, ok := r.Consumers[mt] - if !ok { - if cons, ok = r.Consumers["*/*"]; !ok { - // scream about not knowing what to do - return nil, fmt.Errorf("no consumer: %q", ct) - } - } - return readResponse.ReadResponse(r.response(res), cons) -} - -// SetDebug changes the debug flag. -// It ensures that client and middlewares have the set debug level. -func (r *Runtime) SetDebug(debug bool) { - r.Debug = debug - middleware.Debug = debug -} - -// SetLogger changes the logger stream. -// It ensures that client and middlewares use the same logger. -func (r *Runtime) SetLogger(logger logger.Logger) { - r.logger = logger - middleware.Logger = logger -} - -type ClientResponseFunc = func(*http.Response) runtime.ClientResponse - -// SetResponseReader changes the response reader implementation. -func (r *Runtime) SetResponseReader(f ClientResponseFunc) { - if f == nil { - return - } - r.response = f -} diff --git a/vendor/github.com/go-openapi/runtime/client_auth_info.go b/vendor/github.com/go-openapi/runtime/client_auth_info.go deleted file mode 100644 index c6c97d9a7..000000000 --- a/vendor/github.com/go-openapi/runtime/client_auth_info.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/strfmt" - -// A ClientAuthInfoWriterFunc converts a function to a request writer interface -type ClientAuthInfoWriterFunc func(ClientRequest, strfmt.Registry) error - -// AuthenticateRequest adds authentication data to the request -func (fn ClientAuthInfoWriterFunc) AuthenticateRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// A ClientAuthInfoWriter implementor knows how to write authentication info to a request -type ClientAuthInfoWriter interface { - AuthenticateRequest(ClientRequest, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/client_operation.go b/vendor/github.com/go-openapi/runtime/client_operation.go deleted file mode 100644 index fa21eacf3..000000000 --- a/vendor/github.com/go-openapi/runtime/client_operation.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "net/http" -) - -// ClientOperation represents the context for a swagger operation to be submitted to the transport -type ClientOperation struct { - ID string - Method string - PathPattern string - ProducesMediaTypes []string - ConsumesMediaTypes []string - Schemes []string - AuthInfo ClientAuthInfoWriter - Params ClientRequestWriter - Reader ClientResponseReader - Context context.Context - Client *http.Client -} - -// A ClientTransport implementor knows how to submit Request objects to some destination -type ClientTransport interface { - //Submit(string, RequestWriter, ResponseReader, AuthInfoWriter) (interface{}, error) - Submit(*ClientOperation) (interface{}, error) -} diff --git a/vendor/github.com/go-openapi/runtime/client_request.go b/vendor/github.com/go-openapi/runtime/client_request.go deleted file mode 100644 index d4d2b58f2..000000000 --- a/vendor/github.com/go-openapi/runtime/client_request.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "io" - "net/http" - "net/url" - "time" - - "github.com/go-openapi/strfmt" -) - -// ClientRequestWriterFunc converts a function to a request writer interface -type ClientRequestWriterFunc func(ClientRequest, strfmt.Registry) error - -// WriteToRequest adds data to the request -func (fn ClientRequestWriterFunc) WriteToRequest(req ClientRequest, reg strfmt.Registry) error { - return fn(req, reg) -} - -// ClientRequestWriter is an interface for things that know how to write to a request -type ClientRequestWriter interface { - WriteToRequest(ClientRequest, strfmt.Registry) error -} - -// ClientRequest is an interface for things that know how to -// add information to a swagger client request -type ClientRequest interface { - SetHeaderParam(string, ...string) error - - GetHeaderParams() http.Header - - SetQueryParam(string, ...string) error - - SetFormParam(string, ...string) error - - SetPathParam(string, string) error - - GetQueryParams() url.Values - - SetFileParam(string, ...NamedReadCloser) error - - SetBodyParam(interface{}) error - - SetTimeout(time.Duration) error - - GetMethod() string - - GetPath() string - - GetBody() []byte - - GetBodyParam() interface{} - - GetFileParam() map[string][]NamedReadCloser -} - -// NamedReadCloser represents a named ReadCloser interface -type NamedReadCloser interface { - io.ReadCloser - Name() string -} - -// NamedReader creates a NamedReadCloser for use as file upload -func NamedReader(name string, rdr io.Reader) NamedReadCloser { - rc, ok := rdr.(io.ReadCloser) - if !ok { - rc = io.NopCloser(rdr) - } - return &namedReadCloser{ - name: name, - cr: rc, - } -} - -type namedReadCloser struct { - name string - cr io.ReadCloser -} - -func (n *namedReadCloser) Close() error { - return n.cr.Close() -} -func (n *namedReadCloser) Read(p []byte) (int, error) { - return n.cr.Read(p) -} -func (n *namedReadCloser) Name() string { - return n.name -} - -type TestClientRequest struct { - Headers http.Header - Body interface{} -} - -func (t *TestClientRequest) SetHeaderParam(name string, values ...string) error { - if t.Headers == nil { - t.Headers = make(http.Header) - } - t.Headers.Set(name, values[0]) - return nil -} - -func (t *TestClientRequest) SetQueryParam(_ string, _ ...string) error { return nil } - -func (t *TestClientRequest) SetFormParam(_ string, _ ...string) error { return nil } - -func (t *TestClientRequest) SetPathParam(_ string, _ string) error { return nil } - -func (t *TestClientRequest) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil } - -func (t *TestClientRequest) SetBodyParam(body interface{}) error { - t.Body = body - return nil -} - -func (t *TestClientRequest) SetTimeout(time.Duration) error { - return nil -} - -func (t *TestClientRequest) GetQueryParams() url.Values { return nil } - -func (t *TestClientRequest) GetMethod() string { return "" } - -func (t *TestClientRequest) GetPath() string { return "" } - -func (t *TestClientRequest) GetBody() []byte { return nil } - -func (t *TestClientRequest) GetBodyParam() interface{} { - return t.Body -} - -func (t *TestClientRequest) GetFileParam() map[string][]NamedReadCloser { - return nil -} - -func (t *TestClientRequest) GetHeaderParams() http.Header { - return t.Headers -} diff --git a/vendor/github.com/go-openapi/runtime/client_response.go b/vendor/github.com/go-openapi/runtime/client_response.go deleted file mode 100644 index 0d1691149..000000000 --- a/vendor/github.com/go-openapi/runtime/client_response.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/json" - "fmt" - "io" -) - -// A ClientResponse represents a client response -// This bridges between responses obtained from different transports -type ClientResponse interface { - Code() int - Message() string - GetHeader(string) string - GetHeaders(string) []string - Body() io.ReadCloser -} - -// A ClientResponseReaderFunc turns a function into a ClientResponseReader interface implementation -type ClientResponseReaderFunc func(ClientResponse, Consumer) (interface{}, error) - -// ReadResponse reads the response -func (read ClientResponseReaderFunc) ReadResponse(resp ClientResponse, consumer Consumer) (interface{}, error) { - return read(resp, consumer) -} - -// A ClientResponseReader is an interface for things want to read a response. -// An application of this is to create structs from response values -type ClientResponseReader interface { - ReadResponse(ClientResponse, Consumer) (interface{}, error) -} - -// NewAPIError creates a new API error -func NewAPIError(opName string, payload interface{}, code int) *APIError { - return &APIError{ - OperationName: opName, - Response: payload, - Code: code, - } -} - -// APIError wraps an error model and captures the status code -type APIError struct { - OperationName string - Response interface{} - Code int -} - -func (o *APIError) Error() string { - var resp []byte - if err, ok := o.Response.(error); ok { - resp = []byte("'" + err.Error() + "'") - } else { - resp, _ = json.Marshal(o.Response) - } - return fmt.Sprintf("%s (status %d): %s", o.OperationName, o.Code, resp) -} - -func (o *APIError) String() string { - return o.Error() -} - -// IsSuccess returns true when this elapse o k response returns a 2xx status code -func (o *APIError) IsSuccess() bool { - return o.Code/100 == 2 -} - -// IsRedirect returns true when this elapse o k response returns a 3xx status code -func (o *APIError) IsRedirect() bool { - return o.Code/100 == 3 -} - -// IsClientError returns true when this elapse o k response returns a 4xx status code -func (o *APIError) IsClientError() bool { - return o.Code/100 == 4 -} - -// IsServerError returns true when this elapse o k response returns a 5xx status code -func (o *APIError) IsServerError() bool { - return o.Code/100 == 5 -} - -// IsCode returns true when this elapse o k response returns a 4xx status code -func (o *APIError) IsCode(code int) bool { - return o.Code == code -} - -// A ClientResponseStatus is a common interface implemented by all responses on the generated code -// You can use this to treat any client response based on status code -type ClientResponseStatus interface { - IsSuccess() bool - IsRedirect() bool - IsClientError() bool - IsServerError() bool - IsCode(int) bool -} diff --git a/vendor/github.com/go-openapi/runtime/constants.go b/vendor/github.com/go-openapi/runtime/constants.go deleted file mode 100644 index 515969242..000000000 --- a/vendor/github.com/go-openapi/runtime/constants.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -const ( - // HeaderContentType represents a http content-type header, it's value is supposed to be a mime type - HeaderContentType = "Content-Type" - - // HeaderTransferEncoding represents a http transfer-encoding header. - HeaderTransferEncoding = "Transfer-Encoding" - - // HeaderAccept the Accept header - HeaderAccept = "Accept" - // HeaderAuthorization the Authorization header - HeaderAuthorization = "Authorization" - - charsetKey = "charset" - - // DefaultMime the default fallback mime type - DefaultMime = "application/octet-stream" - // JSONMime the json mime type - JSONMime = "application/json" - // YAMLMime the yaml mime type - YAMLMime = "application/x-yaml" - // XMLMime the xml mime type - XMLMime = "application/xml" - // TextMime the text mime type - TextMime = "text/plain" - // HTMLMime the html mime type - HTMLMime = "text/html" - // CSVMime the csv mime type - CSVMime = "text/csv" - // MultipartFormMime the multipart form mime type - MultipartFormMime = "multipart/form-data" - // URLencodedFormMime the url encoded form mime type - URLencodedFormMime = "application/x-www-form-urlencoded" -) diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go deleted file mode 100644 index d807bd915..000000000 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding/csv" - "errors" - "io" -) - -// CSVConsumer creates a new CSV consumer -func CSVConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("CSVConsumer requires a reader") - } - - csvReader := csv.NewReader(reader) - writer, ok := data.(io.Writer) - if !ok { - return errors.New("data type must be io.Writer") - } - csvWriter := csv.NewWriter(writer) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} - -// CSVProducer creates a new CSV producer -func CSVProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("CSVProducer requires a writer") - } - - dataBytes, ok := data.([]byte) - if !ok { - return errors.New("data type must be byte array") - } - - csvReader := csv.NewReader(bytes.NewBuffer(dataBytes)) - records, err := csvReader.ReadAll() - if err != nil { - return err - } - csvWriter := csv.NewWriter(writer) - for _, r := range records { - if err := csvWriter.Write(r); err != nil { - return err - } - } - csvWriter.Flush() - return nil - }) -} diff --git a/vendor/github.com/go-openapi/runtime/discard.go b/vendor/github.com/go-openapi/runtime/discard.go deleted file mode 100644 index 0d390cfd6..000000000 --- a/vendor/github.com/go-openapi/runtime/discard.go +++ /dev/null @@ -1,9 +0,0 @@ -package runtime - -import "io" - -// DiscardConsumer does absolutely nothing, it's a black hole. -var DiscardConsumer = ConsumerFunc(func(_ io.Reader, _ interface{}) error { return nil }) - -// DiscardProducer does absolutely nothing, it's a black hole. -var DiscardProducer = ProducerFunc(func(_ io.Writer, _ interface{}) error { return nil }) diff --git a/vendor/github.com/go-openapi/runtime/file.go b/vendor/github.com/go-openapi/runtime/file.go deleted file mode 100644 index 397d8a459..000000000 --- a/vendor/github.com/go-openapi/runtime/file.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import "github.com/go-openapi/swag" - -type File = swag.File diff --git a/vendor/github.com/go-openapi/runtime/headers.go b/vendor/github.com/go-openapi/runtime/headers.go deleted file mode 100644 index 4d111db4f..000000000 --- a/vendor/github.com/go-openapi/runtime/headers.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "mime" - "net/http" - - "github.com/go-openapi/errors" -) - -// ContentType parses a content type header -func ContentType(headers http.Header) (string, string, error) { - ct := headers.Get(HeaderContentType) - orig := ct - if ct == "" { - ct = DefaultMime - } - if ct == "" { - return "", "", nil - } - - mt, opts, err := mime.ParseMediaType(ct) - if err != nil { - return "", "", errors.NewParseError(HeaderContentType, "header", orig, err) - } - - if cs, ok := opts[charsetKey]; ok { - return mt, cs, nil - } - - return mt, "", nil -} diff --git a/vendor/github.com/go-openapi/runtime/interfaces.go b/vendor/github.com/go-openapi/runtime/interfaces.go deleted file mode 100644 index e33412868..000000000 --- a/vendor/github.com/go-openapi/runtime/interfaces.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/go-openapi/strfmt" -) - -// OperationHandlerFunc an adapter for a function to the OperationHandler interface -type OperationHandlerFunc func(interface{}) (interface{}, error) - -// Handle implements the operation handler interface -func (s OperationHandlerFunc) Handle(data interface{}) (interface{}, error) { - return s(data) -} - -// OperationHandler a handler for a swagger operation -type OperationHandler interface { - Handle(interface{}) (interface{}, error) -} - -// ConsumerFunc represents a function that can be used as a consumer -type ConsumerFunc func(io.Reader, interface{}) error - -// Consume consumes the reader into the data parameter -func (fn ConsumerFunc) Consume(reader io.Reader, data interface{}) error { - return fn(reader, data) -} - -// Consumer implementations know how to bind the values on the provided interface to -// data provided by the request body -type Consumer interface { - // Consume performs the binding of request values - Consume(io.Reader, interface{}) error -} - -// ProducerFunc represents a function that can be used as a producer -type ProducerFunc func(io.Writer, interface{}) error - -// Produce produces the response for the provided data -func (f ProducerFunc) Produce(writer io.Writer, data interface{}) error { - return f(writer, data) -} - -// Producer implementations know how to turn the provided interface into a valid -// HTTP response -type Producer interface { - // Produce writes to the http response - Produce(io.Writer, interface{}) error -} - -// AuthenticatorFunc turns a function into an authenticator -type AuthenticatorFunc func(interface{}) (bool, interface{}, error) - -// Authenticate authenticates the request with the provided data -func (f AuthenticatorFunc) Authenticate(params interface{}) (bool, interface{}, error) { - return f(params) -} - -// Authenticator represents an authentication strategy -// implementations of Authenticator know how to authenticate the -// request data and translate that into a valid principal object or an error -type Authenticator interface { - Authenticate(interface{}) (bool, interface{}, error) -} - -// AuthorizerFunc turns a function into an authorizer -type AuthorizerFunc func(*http.Request, interface{}) error - -// Authorize authorizes the processing of the request for the principal -func (f AuthorizerFunc) Authorize(r *http.Request, principal interface{}) error { - return f(r, principal) -} - -// Authorizer represents an authorization strategy -// implementations of Authorizer know how to authorize the principal object -// using the request data and returns error if unauthorized -type Authorizer interface { - Authorize(*http.Request, interface{}) error -} - -// Validatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the validations obtained from the spec -type Validatable interface { - Validate(strfmt.Registry) error -} - -// ContextValidatable types implementing this interface allow customizing their validation -// this will be used instead of the reflective validation based on the spec document. -// the implementations are assumed to have been generated by the swagger tool so they should -// contain all the context validations obtained from the spec -type ContextValidatable interface { - ContextValidate(context.Context, strfmt.Registry) error -} diff --git a/vendor/github.com/go-openapi/runtime/json.go b/vendor/github.com/go-openapi/runtime/json.go deleted file mode 100644 index 5a690559c..000000000 --- a/vendor/github.com/go-openapi/runtime/json.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/json" - "io" -) - -// JSONConsumer creates a new JSON consumer -func JSONConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := json.NewDecoder(reader) - dec.UseNumber() // preserve number formats - return dec.Decode(data) - }) -} - -// JSONProducer creates a new JSON producer -func JSONProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := json.NewEncoder(writer) - enc.SetEscapeHTML(false) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/logger/logger.go b/vendor/github.com/go-openapi/runtime/logger/logger.go deleted file mode 100644 index 6f4debcc1..000000000 --- a/vendor/github.com/go-openapi/runtime/logger/logger.go +++ /dev/null @@ -1,20 +0,0 @@ -package logger - -import "os" - -type Logger interface { - Printf(format string, args ...interface{}) - Debugf(format string, args ...interface{}) -} - -func DebugEnabled() bool { - d := os.Getenv("SWAGGER_DEBUG") - if d != "" && d != "false" && d != "0" { - return true - } - d = os.Getenv("DEBUG") - if d != "" && d != "false" && d != "0" { - return true - } - return false -} diff --git a/vendor/github.com/go-openapi/runtime/logger/standard.go b/vendor/github.com/go-openapi/runtime/logger/standard.go deleted file mode 100644 index f7e67ebb9..000000000 --- a/vendor/github.com/go-openapi/runtime/logger/standard.go +++ /dev/null @@ -1,22 +0,0 @@ -package logger - -import ( - "fmt" - "os" -) - -type StandardLogger struct{} - -func (StandardLogger) Printf(format string, args ...interface{}) { - if len(format) == 0 || format[len(format)-1] != '\n' { - format += "\n" - } - fmt.Fprintf(os.Stderr, format, args...) -} - -func (StandardLogger) Debugf(format string, args ...interface{}) { - if len(format) == 0 || format[len(format)-1] != '\n' { - format += "\n" - } - fmt.Fprintf(os.Stderr, format, args...) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/context.go b/vendor/github.com/go-openapi/runtime/middleware/context.go deleted file mode 100644 index d21ae4e87..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/context.go +++ /dev/null @@ -1,635 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - stdContext "context" - "fmt" - "net/http" - "strings" - "sync" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/logger" - "github.com/go-openapi/runtime/middleware/untyped" - "github.com/go-openapi/runtime/security" -) - -// Debug when true turns on verbose logging -var Debug = logger.DebugEnabled() -var Logger logger.Logger = logger.StandardLogger{} - -func debugLog(format string, args ...interface{}) { - if Debug { - Logger.Printf(format, args...) - } -} - -// A Builder can create middlewares -type Builder func(http.Handler) http.Handler - -// PassthroughBuilder returns the handler, aka the builder identity function -func PassthroughBuilder(handler http.Handler) http.Handler { return handler } - -// RequestBinder is an interface for types to implement -// when they want to be able to bind from a request -type RequestBinder interface { - BindRequest(*http.Request, *MatchedRoute) error -} - -// Responder is an interface for types to implement -// when they want to be considered for writing HTTP responses -type Responder interface { - WriteResponse(http.ResponseWriter, runtime.Producer) -} - -// ResponderFunc wraps a func as a Responder interface -type ResponderFunc func(http.ResponseWriter, runtime.Producer) - -// WriteResponse writes to the response -func (fn ResponderFunc) WriteResponse(rw http.ResponseWriter, pr runtime.Producer) { - fn(rw, pr) -} - -// Context is a type safe wrapper around an untyped request context -// used throughout to store request context with the standard context attached -// to the http.Request -type Context struct { - spec *loads.Document - analyzer *analysis.Spec - api RoutableAPI - router Router -} - -type routableUntypedAPI struct { - api *untyped.API - hlock *sync.Mutex - handlers map[string]map[string]http.Handler - defaultConsumes string - defaultProduces string -} - -func newRoutableUntypedAPI(spec *loads.Document, api *untyped.API, context *Context) *routableUntypedAPI { - var handlers map[string]map[string]http.Handler - if spec == nil || api == nil { - return nil - } - analyzer := analysis.New(spec.Spec()) - for method, hls := range analyzer.Operations() { - um := strings.ToUpper(method) - for path, op := range hls { - schemes := analyzer.SecurityRequirementsFor(op) - - if oh, ok := api.OperationHandlerFor(method, path); ok { - if handlers == nil { - handlers = make(map[string]map[string]http.Handler) - } - if b, ok := handlers[um]; !ok || b == nil { - handlers[um] = make(map[string]http.Handler) - } - - var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // lookup route info in the context - route, rCtx, _ := context.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - - // bind and validate the request using reflection - var bound interface{} - var validation error - bound, r, validation = context.BindAndValidate(r, route) - if validation != nil { - context.Respond(w, r, route.Produces, route, validation) - return - } - - // actually handle the request - result, err := oh.Handle(bound) - if err != nil { - // respond with failure - context.Respond(w, r, route.Produces, route, err) - return - } - - // respond with success - context.Respond(w, r, route.Produces, route, result) - }) - - if len(schemes) > 0 { - handler = newSecureAPI(context, handler) - } - handlers[um][path] = handler - } - } - } - - return &routableUntypedAPI{ - api: api, - hlock: new(sync.Mutex), - handlers: handlers, - defaultProduces: api.DefaultProduces, - defaultConsumes: api.DefaultConsumes, - } -} - -func (r *routableUntypedAPI) HandlerFor(method, path string) (http.Handler, bool) { - r.hlock.Lock() - paths, ok := r.handlers[strings.ToUpper(method)] - if !ok { - r.hlock.Unlock() - return nil, false - } - handler, ok := paths[path] - r.hlock.Unlock() - return handler, ok -} -func (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) { - return r.api.ServeError -} -func (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { - return r.api.ConsumersFor(mediaTypes) -} -func (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]runtime.Producer { - return r.api.ProducersFor(mediaTypes) -} -func (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { - return r.api.AuthenticatorsFor(schemes) -} -func (r *routableUntypedAPI) Authorizer() runtime.Authorizer { - return r.api.Authorizer() -} -func (r *routableUntypedAPI) Formats() strfmt.Registry { - return r.api.Formats() -} - -func (r *routableUntypedAPI) DefaultProduces() string { - return r.defaultProduces -} - -func (r *routableUntypedAPI) DefaultConsumes() string { - return r.defaultConsumes -} - -// NewRoutableContext creates a new context for a routable API -func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Router) *Context { - var an *analysis.Spec - if spec != nil { - an = analysis.New(spec.Spec()) - } - - return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes) -} - -// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes in input the analysed spec too -func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context { - // Either there are no spec doc and analysis, or both of them. - if !((spec == nil && an == nil) || (spec != nil && an != nil)) { - panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them")) - } - - ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes} - return ctx -} - -// NewContext creates a new context wrapper -func NewContext(spec *loads.Document, api *untyped.API, routes Router) *Context { - var an *analysis.Spec - if spec != nil { - an = analysis.New(spec.Spec()) - } - ctx := &Context{spec: spec, analyzer: an} - ctx.api = newRoutableUntypedAPI(spec, api, ctx) - ctx.router = routes - return ctx -} - -// Serve serves the specified spec with the specified api registrations as a http.Handler -func Serve(spec *loads.Document, api *untyped.API) http.Handler { - return ServeWithBuilder(spec, api, PassthroughBuilder) -} - -// ServeWithBuilder serves the specified spec with the specified api registrations as a http.Handler that is decorated -// by the Builder -func ServeWithBuilder(spec *loads.Document, api *untyped.API, builder Builder) http.Handler { - context := NewContext(spec, api, nil) - return context.APIHandler(builder) -} - -type contextKey int8 - -const ( - _ contextKey = iota - ctxContentType - ctxResponseFormat - ctxMatchedRoute - ctxBoundParams - ctxSecurityPrincipal - ctxSecurityScopes -) - -// MatchedRouteFrom request context value. -func MatchedRouteFrom(req *http.Request) *MatchedRoute { - mr := req.Context().Value(ctxMatchedRoute) - if mr == nil { - return nil - } - if res, ok := mr.(*MatchedRoute); ok { - return res - } - return nil -} - -// SecurityPrincipalFrom request context value. -func SecurityPrincipalFrom(req *http.Request) interface{} { - return req.Context().Value(ctxSecurityPrincipal) -} - -// SecurityScopesFrom request context value. -func SecurityScopesFrom(req *http.Request) []string { - rs := req.Context().Value(ctxSecurityScopes) - if res, ok := rs.([]string); ok { - return res - } - return nil -} - -type contentTypeValue struct { - MediaType string - Charset string -} - -// BasePath returns the base path for this API -func (c *Context) BasePath() string { - return c.spec.BasePath() -} - -// RequiredProduces returns the accepted content types for responses -func (c *Context) RequiredProduces() []string { - return c.analyzer.RequiredProduces() -} - -// BindValidRequest binds a params object to a request but only when the request is valid -// if the request is not valid an error will be returned -func (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error { - var res []error - var requestContentType string - - // check and validate content type, select consumer - if runtime.HasBody(request) { - ct, _, err := runtime.ContentType(request.Header) - if err != nil { - res = append(res, err) - } else { - if err := validateContentType(route.Consumes, ct); err != nil { - res = append(res, err) - } - if len(res) == 0 { - cons, ok := route.Consumers[ct] - if !ok { - res = append(res, errors.New(500, "no consumer registered for %s", ct)) - } else { - route.Consumer = cons - requestContentType = ct - } - } - } - } - - // check and validate the response format - if len(res) == 0 { - // if the route does not provide Produces and a default contentType could not be identified - // based on a body, typical for GET and DELETE requests, then default contentType to. - if len(route.Produces) == 0 && requestContentType == "" { - requestContentType = "*/*" - } - - if str := NegotiateContentType(request, route.Produces, requestContentType); str == "" { - res = append(res, errors.InvalidResponseFormat(request.Header.Get(runtime.HeaderAccept), route.Produces)) - } - } - - // now bind the request with the provided binder - // it's assumed the binder will also validate the request and return an error if the - // request is invalid - if binder != nil && len(res) == 0 { - if err := binder.BindRequest(request, route); err != nil { - return err - } - } - - if len(res) > 0 { - return errors.CompositeValidationError(res...) - } - return nil -} - -// ContentType gets the parsed value of a content type -// Returns the media type, its charset and a shallow copy of the request -// when its context doesn't contain the content type value, otherwise it returns -// the same request -// Returns the error that runtime.ContentType may retunrs. -func (c *Context) ContentType(request *http.Request) (string, string, *http.Request, error) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxContentType).(*contentTypeValue); ok { - return v.MediaType, v.Charset, request, nil - } - - mt, cs, err := runtime.ContentType(request.Header) - if err != nil { - return "", "", nil, err - } - rCtx = stdContext.WithValue(rCtx, ctxContentType, &contentTypeValue{mt, cs}) - return mt, cs, request.WithContext(rCtx), nil -} - -// LookupRoute looks a route up and returns true when it is found -func (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) { - if route, ok := c.router.Lookup(request.Method, request.URL.EscapedPath()); ok { - return route, ok - } - return nil, false -} - -// RouteInfo tries to match a route for this request -// Returns the matched route, a shallow copy of the request if its context -// contains the matched router, otherwise the same request, and a bool to -// indicate if it the request matches one of the routes, if it doesn't -// then it returns false and nil for the other two return values -func (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, *http.Request, bool) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxMatchedRoute).(*MatchedRoute); ok { - return v, request, ok - } - - if route, ok := c.LookupRoute(request); ok { - rCtx = stdContext.WithValue(rCtx, ctxMatchedRoute, route) - return route, request.WithContext(rCtx), ok - } - - return nil, nil, false -} - -// ResponseFormat negotiates the response content type -// Returns the response format and a shallow copy of the request if its context -// doesn't contain the response format, otherwise the same request -func (c *Context) ResponseFormat(r *http.Request, offers []string) (string, *http.Request) { - var rCtx = r.Context() - - if v, ok := rCtx.Value(ctxResponseFormat).(string); ok { - debugLog("[%s %s] found response format %q in context", r.Method, r.URL.Path, v) - return v, r - } - - format := NegotiateContentType(r, offers, "") - if format != "" { - debugLog("[%s %s] set response format %q in context", r.Method, r.URL.Path, format) - r = r.WithContext(stdContext.WithValue(rCtx, ctxResponseFormat, format)) - } - debugLog("[%s %s] negotiated response format %q", r.Method, r.URL.Path, format) - return format, r -} - -// AllowedMethods gets the allowed methods for the path of this request -func (c *Context) AllowedMethods(request *http.Request) []string { - return c.router.OtherMethods(request.Method, request.URL.EscapedPath()) -} - -// ResetAuth removes the current principal from the request context -func (c *Context) ResetAuth(request *http.Request) *http.Request { - rctx := request.Context() - rctx = stdContext.WithValue(rctx, ctxSecurityPrincipal, nil) - rctx = stdContext.WithValue(rctx, ctxSecurityScopes, nil) - return request.WithContext(rctx) -} - -// Authorize authorizes the request -// Returns the principal object and a shallow copy of the request when its -// context doesn't contain the principal, otherwise the same request or an error -// (the last) if one of the authenticators returns one or an Unauthenticated error -func (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, *http.Request, error) { - if route == nil || !route.HasAuth() { - return nil, nil, nil - } - - var rCtx = request.Context() - if v := rCtx.Value(ctxSecurityPrincipal); v != nil { - return v, request, nil - } - - applies, usr, err := route.Authenticators.Authenticate(request, route) - if !applies || err != nil || !route.Authenticators.AllowsAnonymous() && usr == nil { - if err != nil { - return nil, nil, err - } - return nil, nil, errors.Unauthenticated("invalid credentials") - } - if route.Authorizer != nil { - if err := route.Authorizer.Authorize(request, usr); err != nil { - if _, ok := err.(errors.Error); ok { - return nil, nil, err - } - - return nil, nil, errors.New(http.StatusForbidden, err.Error()) - } - } - - rCtx = request.Context() - - rCtx = stdContext.WithValue(rCtx, ctxSecurityPrincipal, usr) - rCtx = stdContext.WithValue(rCtx, ctxSecurityScopes, route.Authenticator.AllScopes()) - return usr, request.WithContext(rCtx), nil -} - -// BindAndValidate binds and validates the request -// Returns the validation map and a shallow copy of the request when its context -// doesn't contain the validation, otherwise it returns the same request or an -// CompositeValidationError error -func (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, *http.Request, error) { - var rCtx = request.Context() - - if v, ok := rCtx.Value(ctxBoundParams).(*validation); ok { - debugLog("got cached validation (valid: %t)", len(v.result) == 0) - if len(v.result) > 0 { - return v.bound, request, errors.CompositeValidationError(v.result...) - } - return v.bound, request, nil - } - result := validateRequest(c, request, matched) - rCtx = stdContext.WithValue(rCtx, ctxBoundParams, result) - request = request.WithContext(rCtx) - if len(result.result) > 0 { - return result.bound, request, errors.CompositeValidationError(result.result...) - } - debugLog("no validation errors found") - return result.bound, request, nil -} - -// NotFound the default not found responder for when no route has been matched yet -func (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) { - c.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound("not found")) -} - -// Respond renders the response after doing some content negotiation -func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) { - debugLog("responding to %s %s with produces: %v", r.Method, r.URL.Path, produces) - offers := []string{} - for _, mt := range produces { - if mt != c.api.DefaultProduces() { - offers = append(offers, mt) - } - } - // the default producer is last so more specific producers take precedence - offers = append(offers, c.api.DefaultProduces()) - debugLog("offers: %v", offers) - - var format string - format, r = c.ResponseFormat(r, offers) - rw.Header().Set(runtime.HeaderContentType, format) - - if resp, ok := data.(Responder); ok { - producers := route.Producers - // producers contains keys with normalized format, if a format has MIME type parameter such as `text/plain; charset=utf-8` - // then you must provide `text/plain` to get the correct producer. HOWEVER, format here is not normalized. - prod, ok := producers[normalizeOffer(format)] - if !ok { - prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) - pr, ok := prods[c.api.DefaultProduces()] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - prod = pr - } - resp.WriteResponse(rw, prod) - return - } - - if err, ok := data.(error); ok { - if format == "" { - rw.Header().Set(runtime.HeaderContentType, runtime.JSONMime) - } - - if realm := security.FailedBasicAuth(r); realm != "" { - rw.Header().Set("WWW-Authenticate", fmt.Sprintf("Basic realm=%q", realm)) - } - - if route == nil || route.Operation == nil { - c.api.ServeErrorFor("")(rw, r, err) - return - } - c.api.ServeErrorFor(route.Operation.ID)(rw, r, err) - return - } - - if route == nil || route.Operation == nil { - rw.WriteHeader(200) - if r.Method == "HEAD" { - return - } - producers := c.api.ProducersFor(normalizeOffers(offers)) - prod, ok := producers[format] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - if err := prod.Produce(rw, data); err != nil { - panic(err) // let the recovery middleware deal with this - } - return - } - - if _, code, ok := route.Operation.SuccessResponse(); ok { - rw.WriteHeader(code) - if code == 204 || r.Method == "HEAD" { - return - } - - producers := route.Producers - prod, ok := producers[format] - if !ok { - if !ok { - prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()})) - pr, ok := prods[c.api.DefaultProduces()] - if !ok { - panic(errors.New(http.StatusInternalServerError, "can't find a producer for "+format)) - } - prod = pr - } - } - if err := prod.Produce(rw, data); err != nil { - panic(err) // let the recovery middleware deal with this - } - return - } - - c.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, "can't produce response")) -} - -func (c *Context) APIHandlerSwaggerUI(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - - var title string - sp := c.spec.Spec() - if sp != nil && sp.Info != nil && sp.Info.Title != "" { - title = sp.Info.Title - } - - swaggerUIOpts := SwaggerUIOpts{ - BasePath: c.BasePath(), - Title: title, - } - - return Spec("", c.spec.Raw(), SwaggerUI(swaggerUIOpts, c.RoutesHandler(b))) -} - -// APIHandler returns a handler to serve the API, this includes a swagger spec, router and the contract defined in the swagger spec -func (c *Context) APIHandler(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - - var title string - sp := c.spec.Spec() - if sp != nil && sp.Info != nil && sp.Info.Title != "" { - title = sp.Info.Title - } - - redocOpts := RedocOpts{ - BasePath: c.BasePath(), - Title: title, - } - - return Spec("", c.spec.Raw(), Redoc(redocOpts, c.RoutesHandler(b))) -} - -// RoutesHandler returns a handler to serve the API, just the routes and the contract defined in the swagger spec -func (c *Context) RoutesHandler(builder Builder) http.Handler { - b := builder - if b == nil { - b = PassthroughBuilder - } - return NewRouter(c, b(NewOperationExecutor(c))) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE b/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE deleted file mode 100644 index e65039ad8..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Naoya Inada - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md b/vendor/github.com/go-openapi/runtime/middleware/denco/README.md deleted file mode 100644 index 30109e17d..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/README.md +++ /dev/null @@ -1,180 +0,0 @@ -# Denco [![Build Status](https://travis-ci.org/naoina/denco.png?branch=master)](https://travis-ci.org/naoina/denco) - -The fast and flexible HTTP request router for [Go](http://golang.org). - -Denco is based on Double-Array implementation of [Kocha-urlrouter](https://github.com/naoina/kocha-urlrouter). -However, Denco is optimized and some features added. - -## Features - -* Fast (See [go-http-routing-benchmark](https://github.com/naoina/go-http-routing-benchmark)) -* [URL patterns](#url-patterns) (`/foo/:bar` and `/foo/*wildcard`) -* Small (but enough) URL router API -* HTTP request multiplexer like `http.ServeMux` - -## Installation - - go get -u github.com/go-openapi/runtime/middleware/denco - -## Using as HTTP request multiplexer - -```go -package main - -import ( - "fmt" - "log" - "net/http" - - "github.com/go-openapi/runtime/middleware/denco" -) - -func Index(w http.ResponseWriter, r *http.Request, params denco.Params) { - fmt.Fprintf(w, "Welcome to Denco!\n") -} - -func User(w http.ResponseWriter, r *http.Request, params denco.Params) { - fmt.Fprintf(w, "Hello %s!\n", params.Get("name")) -} - -func main() { - mux := denco.NewMux() - handler, err := mux.Build([]denco.Handler{ - mux.GET("/", Index), - mux.GET("/user/:name", User), - mux.POST("/user/:name", User), - }) - if err != nil { - panic(err) - } - log.Fatal(http.ListenAndServe(":8080", handler)) -} -``` - -## Using as URL router - -```go -package main - -import ( - "fmt" - - "github.com/go-openapi/runtime/middleware/denco" -) - -type route struct { - name string -} - -func main() { - router := denco.New() - router.Build([]denco.Record{ - {"/", &route{"root"}}, - {"/user/:id", &route{"user"}}, - {"/user/:name/:id", &route{"username"}}, - {"/static/*filepath", &route{"static"}}, - }) - - data, params, found := router.Lookup("/") - // print `&main.route{name:"root"}, denco.Params(nil), true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/user/hoge") - // print `&main.route{name:"user"}, denco.Params{denco.Param{Name:"id", Value:"hoge"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/user/hoge/7") - // print `&main.route{name:"username"}, denco.Params{denco.Param{Name:"name", Value:"hoge"}, denco.Param{Name:"id", Value:"7"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) - - data, params, found = router.Lookup("/static/path/to/file") - // print `&main.route{name:"static"}, denco.Params{denco.Param{Name:"filepath", Value:"path/to/file"}}, true`. - fmt.Printf("%#v, %#v, %#v\n", data, params, found) -} -``` - -See [Godoc](http://godoc.org/github.com/go-openapi/runtime/middleware/denco) for more details. - -## Getting the value of path parameter - -You can get the value of path parameter by 2 ways. - -1. Using [`denco.Params.Get`](http://godoc.org/github.com/go-openapi/runtime/middleware/denco#Params.Get) method -2. Find by loop - -```go -package main - -import ( - "fmt" - - "github.com/go-openapi/runtime/middleware/denco" -) - -func main() { - router := denco.New() - if err := router.Build([]denco.Record{ - {"/user/:name/:id", "route1"}, - }); err != nil { - panic(err) - } - - // 1. Using denco.Params.Get method. - _, params, _ := router.Lookup("/user/alice/1") - name := params.Get("name") - if name != "" { - fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". - } - - // 2. Find by loop. - for _, param := range params { - if param.Name == "name" { - fmt.Printf("Hello %s.\n", name) // prints "Hello alice.". - } - } -} -``` - -## URL patterns - -Denco's route matching strategy is "most nearly matching". - -When routes `/:name` and `/alice` have been built, URI `/alice` matches the route `/alice`, not `/:name`. -Because URI `/alice` is more match with the route `/alice` than `/:name`. - -For more example, when routes below have been built: - -``` -/user/alice -/user/:name -/user/:name/:id -/user/alice/:id -/user/:id/bob -``` - -Routes matching are: - -``` -/user/alice => "/user/alice" (no match with "/user/:name") -/user/bob => "/user/:name" -/user/naoina/1 => "/user/:name/1" -/user/alice/1 => "/user/alice/:id" (no match with "/user/:name/:id") -/user/1/bob => "/user/:id/bob" (no match with "/user/:name/:id") -/user/alice/bob => "/user/alice/:id" (no match with "/user/:name/:id" and "/user/:id/bob") -``` - -## Limitation - -Denco has some limitations below. - -* Number of param records (such as `/:name`) must be less than 2^22 -* Number of elements of internal slice must be less than 2^22 - -## Benchmarks - - cd $GOPATH/github.com/go-openapi/runtime/middleware/denco - go test -bench . -benchmem - -## License - -Denco is licensed under the MIT License. diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go b/vendor/github.com/go-openapi/runtime/middleware/denco/router.go deleted file mode 100644 index 5d2691ec3..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/router.go +++ /dev/null @@ -1,460 +0,0 @@ -// Package denco provides fast URL router. -package denco - -import ( - "fmt" - "sort" - "strings" -) - -const ( - // ParamCharacter is a special character for path parameter. - ParamCharacter = ':' - - // WildcardCharacter is a special character for wildcard path parameter. - WildcardCharacter = '*' - - // TerminationCharacter is a special character for end of path. - TerminationCharacter = '#' - - // SeparatorCharacter separates path segments. - SeparatorCharacter = '/' - - // PathParamCharacter indicates a RESTCONF path param - PathParamCharacter = '=' - - // MaxSize is max size of records and internal slice. - MaxSize = (1 << 22) - 1 -) - -// Router represents a URL router. -type Router struct { - // SizeHint expects the maximum number of path parameters in records to Build. - // SizeHint will be used to determine the capacity of the memory to allocate. - // By default, SizeHint will be determined from given records to Build. - SizeHint int - - static map[string]interface{} - param *doubleArray -} - -// New returns a new Router. -func New() *Router { - return &Router{ - SizeHint: -1, - static: make(map[string]interface{}), - param: newDoubleArray(), - } -} - -// Lookup returns data and path parameters that associated with path. -// params is a slice of the Param that arranged in the order in which parameters appeared. -// e.g. when built routing path is "/path/to/:id/:name" and given path is "/path/to/1/alice". params order is [{"id": "1"}, {"name": "alice"}], not [{"name": "alice"}, {"id": "1"}]. -func (rt *Router) Lookup(path string) (data interface{}, params Params, found bool) { - if data, found := rt.static[path]; found { - return data, nil, true - } - if len(rt.param.node) == 1 { - return nil, nil, false - } - nd, params, found := rt.param.lookup(path, make([]Param, 0, rt.SizeHint), 1) - if !found { - return nil, nil, false - } - for i := 0; i < len(params); i++ { - params[i].Name = nd.paramNames[i] - } - return nd.data, params, true -} - -// Build builds URL router from records. -func (rt *Router) Build(records []Record) error { - statics, params := makeRecords(records) - if len(params) > MaxSize { - return fmt.Errorf("denco: too many records") - } - if rt.SizeHint < 0 { - rt.SizeHint = 0 - for _, p := range params { - size := 0 - for _, k := range p.Key { - if k == ParamCharacter || k == WildcardCharacter { - size++ - } - } - if size > rt.SizeHint { - rt.SizeHint = size - } - } - } - for _, r := range statics { - rt.static[r.Key] = r.Value - } - if err := rt.param.build(params, 1, 0, make(map[int]struct{})); err != nil { - return err - } - return nil -} - -// Param represents name and value of path parameter. -type Param struct { - Name string - Value string -} - -// Params represents the name and value of path parameters. -type Params []Param - -// Get gets the first value associated with the given name. -// If there are no values associated with the key, Get returns "". -func (ps Params) Get(name string) string { - for _, p := range ps { - if p.Name == name { - return p.Value - } - } - return "" -} - -type doubleArray struct { - bc []baseCheck - node []*node -} - -func newDoubleArray() *doubleArray { - return &doubleArray{ - bc: []baseCheck{0}, - node: []*node{nil}, // A start index is adjusting to 1 because 0 will be used as a mark of non-existent node. - } -} - -// baseCheck contains BASE, CHECK and Extra flags. -// From the top, 22bits of BASE, 2bits of Extra flags and 8bits of CHECK. -// -// BASE (22bit) | Extra flags (2bit) | CHECK (8bit) -// |----------------------|--|--------| -// 32 10 8 0 -type baseCheck uint32 - -func (bc baseCheck) Base() int { - return int(bc >> 10) -} - -func (bc *baseCheck) SetBase(base int) { - *bc |= baseCheck(base) << 10 -} - -func (bc baseCheck) Check() byte { - return byte(bc) -} - -func (bc *baseCheck) SetCheck(check byte) { - *bc |= baseCheck(check) -} - -func (bc baseCheck) IsEmpty() bool { - return bc&0xfffffcff == 0 -} - -func (bc baseCheck) IsSingleParam() bool { - return bc¶mTypeSingle == paramTypeSingle -} - -func (bc baseCheck) IsWildcardParam() bool { - return bc¶mTypeWildcard == paramTypeWildcard -} - -func (bc baseCheck) IsAnyParam() bool { - return bc¶mTypeAny != 0 -} - -func (bc *baseCheck) SetSingleParam() { - *bc |= (1 << 8) -} - -func (bc *baseCheck) SetWildcardParam() { - *bc |= (1 << 9) -} - -const ( - paramTypeSingle = 0x0100 - paramTypeWildcard = 0x0200 - paramTypeAny = 0x0300 -) - -func (da *doubleArray) lookup(path string, params []Param, idx int) (*node, []Param, bool) { - indices := make([]uint64, 0, 1) - for i := 0; i < len(path); i++ { - if da.bc[idx].IsAnyParam() { - indices = append(indices, (uint64(i)<<32)|(uint64(idx)&0xffffffff)) - } - c := path[i] - if idx = nextIndex(da.bc[idx].Base(), c); idx >= len(da.bc) || da.bc[idx].Check() != c { - goto BACKTRACKING - } - } - if next := nextIndex(da.bc[idx].Base(), TerminationCharacter); next < len(da.bc) && da.bc[next].Check() == TerminationCharacter { - return da.node[da.bc[next].Base()], params, true - } -BACKTRACKING: - for j := len(indices) - 1; j >= 0; j-- { - i, idx := int(indices[j]>>32), int(indices[j]&0xffffffff) - if da.bc[idx].IsSingleParam() { - idx := nextIndex(da.bc[idx].Base(), ParamCharacter) - if idx >= len(da.bc) { - break - } - next := NextSeparator(path, i) - params := append(params, Param{Value: path[i:next]}) - if nd, params, found := da.lookup(path[next:], params, idx); found { - return nd, params, true - } - } - if da.bc[idx].IsWildcardParam() { - idx := nextIndex(da.bc[idx].Base(), WildcardCharacter) - params := append(params, Param{Value: path[i:]}) - return da.node[da.bc[idx].Base()], params, true - } - } - return nil, nil, false -} - -// build builds double-array from records. -func (da *doubleArray) build(srcs []*record, idx, depth int, usedBase map[int]struct{}) error { - sort.Stable(recordSlice(srcs)) - base, siblings, leaf, err := da.arrange(srcs, idx, depth, usedBase) - if err != nil { - return err - } - if leaf != nil { - nd, err := makeNode(leaf) - if err != nil { - return err - } - da.bc[idx].SetBase(len(da.node)) - da.node = append(da.node, nd) - } - for _, sib := range siblings { - da.setCheck(nextIndex(base, sib.c), sib.c) - } - for _, sib := range siblings { - records := srcs[sib.start:sib.end] - switch sib.c { - case ParamCharacter: - for _, r := range records { - next := NextSeparator(r.Key, depth+1) - name := r.Key[depth+1 : next] - r.paramNames = append(r.paramNames, name) - r.Key = r.Key[next:] - } - da.bc[idx].SetSingleParam() - if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { - return err - } - case WildcardCharacter: - r := records[0] - name := r.Key[depth+1 : len(r.Key)-1] - r.paramNames = append(r.paramNames, name) - r.Key = "" - da.bc[idx].SetWildcardParam() - if err := da.build(records, nextIndex(base, sib.c), 0, usedBase); err != nil { - return err - } - default: - if err := da.build(records, nextIndex(base, sib.c), depth+1, usedBase); err != nil { - return err - } - } - } - return nil -} - -// setBase sets BASE. -func (da *doubleArray) setBase(i, base int) { - da.bc[i].SetBase(base) -} - -// setCheck sets CHECK. -func (da *doubleArray) setCheck(i int, check byte) { - da.bc[i].SetCheck(check) -} - -// findEmptyIndex returns an index of unused BASE/CHECK node. -func (da *doubleArray) findEmptyIndex(start int) int { - i := start - for ; i < len(da.bc); i++ { - if da.bc[i].IsEmpty() { - break - } - } - return i -} - -// findBase returns good BASE. -func (da *doubleArray) findBase(siblings []sibling, start int, usedBase map[int]struct{}) (base int) { - for idx, firstChar := start+1, siblings[0].c; ; idx = da.findEmptyIndex(idx + 1) { - base = nextIndex(idx, firstChar) - if _, used := usedBase[base]; used { - continue - } - i := 0 - for ; i < len(siblings); i++ { - next := nextIndex(base, siblings[i].c) - if len(da.bc) <= next { - da.bc = append(da.bc, make([]baseCheck, next-len(da.bc)+1)...) - } - if !da.bc[next].IsEmpty() { - break - } - } - if i == len(siblings) { - break - } - } - usedBase[base] = struct{}{} - return base -} - -func (da *doubleArray) arrange(records []*record, idx, depth int, usedBase map[int]struct{}) (base int, siblings []sibling, leaf *record, err error) { - siblings, leaf, err = makeSiblings(records, depth) - if err != nil { - return -1, nil, nil, err - } - if len(siblings) < 1 { - return -1, nil, leaf, nil - } - base = da.findBase(siblings, idx, usedBase) - if base > MaxSize { - return -1, nil, nil, fmt.Errorf("denco: too many elements of internal slice") - } - da.setBase(idx, base) - return base, siblings, leaf, err -} - -// node represents a node of Double-Array. -type node struct { - data interface{} - - // Names of path parameters. - paramNames []string -} - -// makeNode returns a new node from record. -func makeNode(r *record) (*node, error) { - dups := make(map[string]bool) - for _, name := range r.paramNames { - if dups[name] { - return nil, fmt.Errorf("denco: path parameter `%v' is duplicated in the key `%v'", name, r.Key) - } - dups[name] = true - } - return &node{data: r.Value, paramNames: r.paramNames}, nil -} - -// sibling represents an intermediate data of build for Double-Array. -type sibling struct { - // An index of start of duplicated characters. - start int - - // An index of end of duplicated characters. - end int - - // A character of sibling. - c byte -} - -// nextIndex returns a next index of array of BASE/CHECK. -func nextIndex(base int, c byte) int { - return base ^ int(c) -} - -// makeSiblings returns slice of sibling. -func makeSiblings(records []*record, depth int) (sib []sibling, leaf *record, err error) { - var ( - pc byte - n int - ) - for i, r := range records { - if len(r.Key) <= depth { - leaf = r - continue - } - c := r.Key[depth] - switch { - case pc < c: - sib = append(sib, sibling{start: i, c: c}) - case pc == c: - continue - default: - return nil, nil, fmt.Errorf("denco: BUG: routing table hasn't been sorted") - } - if n > 0 { - sib[n-1].end = i - } - pc = c - n++ - } - if n == 0 { - return nil, leaf, nil - } - sib[n-1].end = len(records) - return sib, leaf, nil -} - -// Record represents a record data for router construction. -type Record struct { - // Key for router construction. - Key string - - // Result value for Key. - Value interface{} -} - -// NewRecord returns a new Record. -func NewRecord(key string, value interface{}) Record { - return Record{ - Key: key, - Value: value, - } -} - -// record represents a record that use to build the Double-Array. -type record struct { - Record - paramNames []string -} - -// makeRecords returns the records that use to build Double-Arrays. -func makeRecords(srcs []Record) (statics, params []*record) { - termChar := string(TerminationCharacter) - paramPrefix := string(SeparatorCharacter) + string(ParamCharacter) - wildcardPrefix := string(SeparatorCharacter) + string(WildcardCharacter) - restconfPrefix := string(PathParamCharacter) + string(ParamCharacter) - for _, r := range srcs { - if strings.Contains(r.Key, paramPrefix) || strings.Contains(r.Key, wildcardPrefix) ||strings.Contains(r.Key, restconfPrefix){ - r.Key += termChar - params = append(params, &record{Record: r}) - } else { - statics = append(statics, &record{Record: r}) - } - } - return statics, params -} - -// recordSlice represents a slice of Record for sort and implements the sort.Interface. -type recordSlice []*record - -// Len implements the sort.Interface.Len. -func (rs recordSlice) Len() int { - return len(rs) -} - -// Less implements the sort.Interface.Less. -func (rs recordSlice) Less(i, j int) bool { - return rs[i].Key < rs[j].Key -} - -// Swap implements the sort.Interface.Swap. -func (rs recordSlice) Swap(i, j int) { - rs[i], rs[j] = rs[j], rs[i] -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go b/vendor/github.com/go-openapi/runtime/middleware/denco/server.go deleted file mode 100644 index 0886713c1..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/server.go +++ /dev/null @@ -1,106 +0,0 @@ -package denco - -import ( - "net/http" -) - -// Mux represents a multiplexer for HTTP request. -type Mux struct{} - -// NewMux returns a new Mux. -func NewMux() *Mux { - return &Mux{} -} - -// GET is shorthand of Mux.Handler("GET", path, handler). -func (m *Mux) GET(path string, handler HandlerFunc) Handler { - return m.Handler("GET", path, handler) -} - -// POST is shorthand of Mux.Handler("POST", path, handler). -func (m *Mux) POST(path string, handler HandlerFunc) Handler { - return m.Handler("POST", path, handler) -} - -// PUT is shorthand of Mux.Handler("PUT", path, handler). -func (m *Mux) PUT(path string, handler HandlerFunc) Handler { - return m.Handler("PUT", path, handler) -} - -// HEAD is shorthand of Mux.Handler("HEAD", path, handler). -func (m *Mux) HEAD(path string, handler HandlerFunc) Handler { - return m.Handler("HEAD", path, handler) -} - -// Handler returns a handler for HTTP method. -func (m *Mux) Handler(method, path string, handler HandlerFunc) Handler { - return Handler{ - Method: method, - Path: path, - Func: handler, - } -} - -// Build builds a http.Handler. -func (m *Mux) Build(handlers []Handler) (http.Handler, error) { - recordMap := make(map[string][]Record) - for _, h := range handlers { - recordMap[h.Method] = append(recordMap[h.Method], NewRecord(h.Path, h.Func)) - } - mux := newServeMux() - for m, records := range recordMap { - router := New() - if err := router.Build(records); err != nil { - return nil, err - } - mux.routers[m] = router - } - return mux, nil -} - -// Handler represents a handler of HTTP request. -type Handler struct { - // Method is an HTTP method. - Method string - - // Path is a routing path for handler. - Path string - - // Func is a function of handler of HTTP request. - Func HandlerFunc -} - -// The HandlerFunc type is aliased to type of handler function. -type HandlerFunc func(w http.ResponseWriter, r *http.Request, params Params) - -type serveMux struct { - routers map[string]*Router -} - -func newServeMux() *serveMux { - return &serveMux{ - routers: make(map[string]*Router), - } -} - -// ServeHTTP implements http.Handler interface. -func (mux *serveMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { - handler, params := mux.handler(r.Method, r.URL.Path) - handler(w, r, params) -} - -func (mux *serveMux) handler(method, path string) (HandlerFunc, []Param) { - if router, found := mux.routers[method]; found { - if handler, params, found := router.Lookup(path); found { - return handler.(HandlerFunc), params - } - } - return NotFound, nil -} - -// NotFound replies to the request with an HTTP 404 not found error. -// NotFound is called when unknown HTTP method or a handler not found. -// If you want to use the your own NotFound handler, please overwrite this variable. -var NotFound = func(w http.ResponseWriter, r *http.Request, _ Params) { - http.NotFound(w, r) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go b/vendor/github.com/go-openapi/runtime/middleware/denco/util.go deleted file mode 100644 index edc1f6ab8..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/denco/util.go +++ /dev/null @@ -1,12 +0,0 @@ -package denco - -// NextSeparator returns an index of next separator in path. -func NextSeparator(path string, start int) int { - for start < len(path) { - if c := path[start]; c == '/' || c == TerminationCharacter { - break - } - start++ - } - return start -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/doc.go b/vendor/github.com/go-openapi/runtime/middleware/doc.go deleted file mode 100644 index eaf90606a..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/doc.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/*Package middleware provides the library with helper functions for serving swagger APIs. - -Pseudo middleware handler - - import ( - "net/http" - - "github.com/go-openapi/errors" - ) - - func newCompleteMiddleware(ctx *Context) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // use context to lookup routes - if matched, ok := ctx.RouteInfo(r); ok { - - if matched.NeedsAuth() { - if _, err := ctx.Authorize(r, matched); err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - } - - bound, validation := ctx.BindAndValidate(r, matched) - if validation != nil { - ctx.Respond(rw, r, matched.Produces, matched, validation) - return - } - - result, err := matched.Handler.Handle(bound) - if err != nil { - ctx.Respond(rw, r, matched.Produces, matched, err) - return - } - - ctx.Respond(rw, r, matched.Produces, matched, result) - return - } - - // Not found, check if it exists in the other methods first - if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) - return - } - ctx.Respond(rw, r, ctx.spec.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.Path)) - }) - } -*/ -package middleware diff --git a/vendor/github.com/go-openapi/runtime/middleware/go18.go b/vendor/github.com/go-openapi/runtime/middleware/go18.go deleted file mode 100644 index 75c762c09..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build go1.8 - -package middleware - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.PathUnescape(path) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/header/header.go b/vendor/github.com/go-openapi/runtime/middleware/header/header.go deleted file mode 100644 index e069743e3..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/header/header.go +++ /dev/null @@ -1,329 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// this file was taken from the github.com/golang/gddo repository - -// Package header provides functions for parsing HTTP headers. -package header - -import ( - "net/http" - "strings" - "time" -) - -// Octet types from RFC 2616. -var octetTypes [256]octetType - -type octetType byte - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// Copy returns a shallow copy of the header. -func Copy(header http.Header) http.Header { - h := make(http.Header) - for k, vs := range header { - h[k] = vs - } - return h -} - -var timeLayouts = []string{"Mon, 02 Jan 2006 15:04:05 GMT", time.RFC850, time.ANSIC} - -// ParseTime parses the header as time. The zero value is returned if the -// header is not present or there is an error parsing the -// header. -func ParseTime(header http.Header, key string) time.Time { - if s := header.Get(key); s != "" { - for _, layout := range timeLayouts { - if t, err := time.Parse(layout, s); err == nil { - return t.UTC() - } - } - } - return time.Time{} -} - -// ParseList parses a comma separated list of values. Commas are ignored in -// quoted strings. Quoted values are not unescaped or unquoted. Whitespace is -// trimmed. -func ParseList(header http.Header, key string) []string { - var result []string - for _, s := range header[http.CanonicalHeaderKey(key)] { - begin := 0 - end := 0 - escape := false - quote := false - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - end = i + 1 - case quote: - switch b { - case '\\': - escape = true - case '"': - quote = false - } - end = i + 1 - case b == '"': - quote = true - end = i + 1 - case octetTypes[b]&isSpace != 0: - if begin == end { - begin = i + 1 - end = begin - } - case b == ',': - if begin < end { - result = append(result, s[begin:end]) - } - begin = i + 1 - end = begin - default: - end = i + 1 - } - } - if begin < end { - result = append(result, s[begin:end]) - } - } - return result -} - -// ParseValueAndParams parses a comma separated list of values with optional -// semicolon separated name-value pairs. Content-Type and Content-Disposition -// headers are in this format. -func ParseValueAndParams(header http.Header, key string) (string, map[string]string) { - return parseValueAndParams(header.Get(key)) -} - -func parseValueAndParams(s string) (value string, params map[string]string) { - params = make(map[string]string) - value, s = expectTokenSlash(s) - if value == "" { - return - } - value = strings.ToLower(value) - s = skipSpace(s) - for strings.HasPrefix(s, ";") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -// AcceptSpec ... -type AcceptSpec struct { - Value string - Q float64 -} - -// ParseAccept2 ... -func ParseAccept2(header http.Header, key string) (specs []AcceptSpec) { - for _, en := range ParseList(header, key) { - v, p := parseValueAndParams(en) - var spec AcceptSpec - spec.Value = v - spec.Q = 1.0 - if p != nil { - if q, ok := p["q"]; ok { - spec.Q, _ = expectQuality(q) - } - } - if spec.Q < 0.0 { - continue - } - specs = append(specs, spec) - } - - return -} - -// ParseAccept parses Accept* headers. -func ParseAccept(header http.Header, key string) (specs []AcceptSpec) { -loop: - for _, s := range header[key] { - for { - var spec AcceptSpec - spec.Value, s = expectTokenSlash(s) - if spec.Value == "" { - continue loop - } - spec.Q = 1.0 - s = skipSpace(s) - if strings.HasPrefix(s, ";") { - s = skipSpace(s[1:]) - for !strings.HasPrefix(s, "q=") && s != "" && !strings.HasPrefix(s, ",") { - s = skipSpace(s[1:]) - } - if strings.HasPrefix(s, "q=") { - spec.Q, s = expectQuality(s[2:]) - if spec.Q < 0.0 { - continue loop - } - } - } - specs = append(specs, spec) - s = skipSpace(s) - if !strings.HasPrefix(s, ",") { - continue loop - } - s = skipSpace(s[1:]) - } - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenSlash(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - b := s[i] - if (octetTypes[b]&isToken == 0) && b != '/' { - break - } - } - return s[:i], s[i:] -} - -func expectQuality(s string) (q float64, rest string) { - switch { - case len(s) == 0: - return -1, "" - case s[0] == '0': - // q is already 0 - s = s[1:] - case s[0] == '1': - s = s[1:] - q = 1 - case s[0] == '.': - // q is already 0 - default: - return -1, "" - } - if !strings.HasPrefix(s, ".") { - return q, s - } - s = s[1:] - i := 0 - n := 0 - d := 1 - for ; i < len(s); i++ { - b := s[i] - if b < '0' || b > '9' { - break - } - n = n*10 + int(b) - '0' - d *= 10 - } - return q + float64(n)/float64(d), s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go b/vendor/github.com/go-openapi/runtime/middleware/negotiate.go deleted file mode 100644 index a9b6f27d3..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/negotiate.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file or at -// https://developers.google.com/open-source/licenses/bsd. - -// this file was taken from the github.com/golang/gddo repository - -package middleware - -import ( - "net/http" - "strings" - - "github.com/go-openapi/runtime/middleware/header" -) - -// NegotiateContentEncoding returns the best offered content encoding for the -// request's Accept-Encoding header. If two offers match with equal weight and -// then the offer earlier in the list is preferred. If no offers are -// acceptable, then "" is returned. -func NegotiateContentEncoding(r *http.Request, offers []string) string { - bestOffer := "identity" - bestQ := -1.0 - specs := header.ParseAccept(r.Header, "Accept-Encoding") - for _, offer := range offers { - for _, spec := range specs { - if spec.Q > bestQ && - (spec.Value == "*" || spec.Value == offer) { - bestQ = spec.Q - bestOffer = offer - } - } - } - if bestQ == 0 { - bestOffer = "" - } - return bestOffer -} - -// NegotiateContentType returns the best offered content type for the request's -// Accept header. If two offers match with equal weight, then the more specific -// offer is preferred. For example, text/* trumps */*. If two offers match -// with equal weight and specificity, then the offer earlier in the list is -// preferred. If no offers match, then defaultOffer is returned. -func NegotiateContentType(r *http.Request, offers []string, defaultOffer string) string { - bestOffer := defaultOffer - bestQ := -1.0 - bestWild := 3 - specs := header.ParseAccept(r.Header, "Accept") - for _, rawOffer := range offers { - offer := normalizeOffer(rawOffer) - // No Accept header: just return the first offer. - if len(specs) == 0 { - return rawOffer - } - for _, spec := range specs { - switch { - case spec.Q == 0.0: - // ignore - case spec.Q < bestQ: - // better match found - case spec.Value == "*/*": - if spec.Q > bestQ || bestWild > 2 { - bestQ = spec.Q - bestWild = 2 - bestOffer = rawOffer - } - case strings.HasSuffix(spec.Value, "/*"): - if strings.HasPrefix(offer, spec.Value[:len(spec.Value)-1]) && - (spec.Q > bestQ || bestWild > 1) { - bestQ = spec.Q - bestWild = 1 - bestOffer = rawOffer - } - default: - if spec.Value == offer && - (spec.Q > bestQ || bestWild > 0) { - bestQ = spec.Q - bestWild = 0 - bestOffer = rawOffer - } - } - } - } - return bestOffer -} - -func normalizeOffers(orig []string) (norm []string) { - for _, o := range orig { - norm = append(norm, normalizeOffer(o)) - } - return -} - -func normalizeOffer(orig string) string { - return strings.SplitN(orig, ";", 2)[0] -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go b/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go deleted file mode 100644 index bc6942a0f..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/not_implemented.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - - "github.com/go-openapi/runtime" -) - -type errorResp struct { - code int - response interface{} - headers http.Header -} - -func (e *errorResp) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { - for k, v := range e.headers { - for _, val := range v { - rw.Header().Add(k, val) - } - } - if e.code > 0 { - rw.WriteHeader(e.code) - } else { - rw.WriteHeader(http.StatusInternalServerError) - } - if err := producer.Produce(rw, e.response); err != nil { - Logger.Printf("failed to write error response: %v", err) - } -} - -// NotImplemented the error response when the response is not implemented -func NotImplemented(message string) Responder { - return Error(http.StatusNotImplemented, message) -} - -// Error creates a generic responder for returning errors, the data will be serialized -// with the matching producer for the request -func Error(code int, data interface{}, headers ...http.Header) Responder { - var hdr http.Header - for _, h := range headers { - for k, v := range h { - if hdr == nil { - hdr = make(http.Header) - } - hdr[k] = v - } - } - return &errorResp{ - code: code, - response: data, - headers: hdr, - } -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/operation.go b/vendor/github.com/go-openapi/runtime/middleware/operation.go deleted file mode 100644 index 1175a63cf..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/operation.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import "net/http" - -// NewOperationExecutor creates a context aware middleware that handles the operations after routing -func NewOperationExecutor(ctx *Context) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - // use context to lookup routes - route, rCtx, _ := ctx.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - - route.Handler.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/parameter.go b/vendor/github.com/go-openapi/runtime/middleware/parameter.go deleted file mode 100644 index 9aaf65958..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/parameter.go +++ /dev/null @@ -1,485 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - - "github.com/go-openapi/errors" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/swag" - "github.com/go-openapi/validate" - - "github.com/go-openapi/runtime" -) - -const defaultMaxMemory = 32 << 20 - -var textUnmarshalType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem() - -func newUntypedParamBinder(param spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *untypedParamBinder { - binder := new(untypedParamBinder) - binder.Name = param.Name - binder.parameter = ¶m - binder.formats = formats - if param.In != "body" { - binder.validator = validate.NewParamValidator(¶m, formats) - } else { - binder.validator = validate.NewSchemaValidator(param.Schema, spec, param.Name, formats) - } - - return binder -} - -type untypedParamBinder struct { - parameter *spec.Parameter - formats strfmt.Registry - Name string - validator validate.EntityValidator -} - -func (p *untypedParamBinder) Type() reflect.Type { - return p.typeForSchema(p.parameter.Type, p.parameter.Format, p.parameter.Items) -} - -func (p *untypedParamBinder) typeForSchema(tpe, format string, items *spec.Items) reflect.Type { - switch tpe { - case "boolean": - return reflect.TypeOf(true) - - case "string": - if tt, ok := p.formats.GetType(format); ok { - return tt - } - return reflect.TypeOf("") - - case "integer": - switch format { - case "int8": - return reflect.TypeOf(int8(0)) - case "int16": - return reflect.TypeOf(int16(0)) - case "int32": - return reflect.TypeOf(int32(0)) - case "int64": - return reflect.TypeOf(int64(0)) - default: - return reflect.TypeOf(int64(0)) - } - - case "number": - switch format { - case "float": - return reflect.TypeOf(float32(0)) - case "double": - return reflect.TypeOf(float64(0)) - } - - case "array": - if items == nil { - return nil - } - itemsType := p.typeForSchema(items.Type, items.Format, items.Items) - if itemsType == nil { - return nil - } - return reflect.MakeSlice(reflect.SliceOf(itemsType), 0, 0).Type() - - case "file": - return reflect.TypeOf(&runtime.File{}).Elem() - - case "object": - return reflect.TypeOf(map[string]interface{}{}) - } - return nil -} - -func (p *untypedParamBinder) allowsMulti() bool { - return p.parameter.In == "query" || p.parameter.In == "formData" -} - -func (p *untypedParamBinder) readValue(values runtime.Gettable, target reflect.Value) ([]string, bool, bool, error) { - name, in, cf, tpe := p.parameter.Name, p.parameter.In, p.parameter.CollectionFormat, p.parameter.Type - if tpe == "array" { - if cf == "multi" { - if !p.allowsMulti() { - return nil, false, false, errors.InvalidCollectionFormat(name, in, cf) - } - vv, hasKey, _ := values.GetOK(name) - return vv, false, hasKey, nil - } - - v, hk, hv := values.GetOK(name) - if !hv { - return nil, false, hk, nil - } - d, c, e := p.readFormattedSliceFieldValue(v[len(v)-1], target) - return d, c, hk, e - } - - vv, hk, _ := values.GetOK(name) - return vv, false, hk, nil -} - -func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, target reflect.Value) error { - // fmt.Println("binding", p.name, "as", p.Type()) - switch p.parameter.In { - case "query": - data, custom, hasKey, err := p.readValue(runtime.Values(request.URL.Query()), target) - if err != nil { - return err - } - if custom { - return nil - } - - return p.bindValue(data, hasKey, target) - - case "header": - data, custom, hasKey, err := p.readValue(runtime.Values(request.Header), target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "path": - data, custom, hasKey, err := p.readValue(routeParams, target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "formData": - var err error - var mt string - - mt, _, e := runtime.ContentType(request.Header) - if e != nil { - // because of the interface conversion go thinks the error is not nil - // so we first check for nil and then set the err var if it's not nil - err = e - } - - if err != nil { - return errors.InvalidContentType("", []string{"multipart/form-data", "application/x-www-form-urlencoded"}) - } - - if mt != "multipart/form-data" && mt != "application/x-www-form-urlencoded" { - return errors.InvalidContentType(mt, []string{"multipart/form-data", "application/x-www-form-urlencoded"}) - } - - if mt == "multipart/form-data" { - if err = request.ParseMultipartForm(defaultMaxMemory); err != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", err) - } - } - - if err = request.ParseForm(); err != nil { - return errors.NewParseError(p.Name, p.parameter.In, "", err) - } - - if p.parameter.Type == "file" { - file, header, ffErr := request.FormFile(p.parameter.Name) - if ffErr != nil { - if p.parameter.Required { - return errors.NewParseError(p.Name, p.parameter.In, "", ffErr) - } else { - return nil - } - } - target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header})) - return nil - } - - if request.MultipartForm != nil { - data, custom, hasKey, rvErr := p.readValue(runtime.Values(request.MultipartForm.Value), target) - if rvErr != nil { - return rvErr - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - } - data, custom, hasKey, err := p.readValue(runtime.Values(request.PostForm), target) - if err != nil { - return err - } - if custom { - return nil - } - return p.bindValue(data, hasKey, target) - - case "body": - newValue := reflect.New(target.Type()) - if !runtime.HasBody(request) { - if p.parameter.Default != nil { - target.Set(reflect.ValueOf(p.parameter.Default)) - } - - return nil - } - if err := consumer.Consume(request.Body, newValue.Interface()); err != nil { - if err == io.EOF && p.parameter.Default != nil { - target.Set(reflect.ValueOf(p.parameter.Default)) - return nil - } - tpe := p.parameter.Type - if p.parameter.Format != "" { - tpe = p.parameter.Format - } - return errors.InvalidType(p.Name, p.parameter.In, tpe, nil) - } - target.Set(reflect.Indirect(newValue)) - return nil - default: - return errors.New(500, fmt.Sprintf("invalid parameter location %q", p.parameter.In)) - } -} - -func (p *untypedParamBinder) bindValue(data []string, hasKey bool, target reflect.Value) error { - if p.parameter.Type == "array" { - return p.setSliceFieldValue(target, p.parameter.Default, data, hasKey) - } - var d string - if len(data) > 0 { - d = data[len(data)-1] - } - return p.setFieldValue(target, p.parameter.Default, d, hasKey) -} - -func (p *untypedParamBinder) setFieldValue(target reflect.Value, defaultValue interface{}, data string, hasKey bool) error { - tpe := p.parameter.Type - if p.parameter.Format != "" { - tpe = p.parameter.Format - } - - if (!hasKey || (!p.parameter.AllowEmptyValue && data == "")) && p.parameter.Required && p.parameter.Default == nil { - return errors.Required(p.Name, p.parameter.In, data) - } - - ok, err := p.tryUnmarshaler(target, defaultValue, data) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if ok { - return nil - } - - defVal := reflect.Zero(target.Type()) - if defaultValue != nil { - defVal = reflect.ValueOf(defaultValue) - } - - if tpe == "byte" { - if data == "" { - if target.CanSet() { - target.SetBytes(defVal.Bytes()) - } - return nil - } - - b, err := base64.StdEncoding.DecodeString(data) - if err != nil { - b, err = base64.URLEncoding.DecodeString(data) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - } - if target.CanSet() { - target.SetBytes(b) - } - return nil - } - - switch target.Kind() { - case reflect.Bool: - if data == "" { - if target.CanSet() { - target.SetBool(defVal.Bool()) - } - return nil - } - b, err := swag.ConvertBool(data) - if err != nil { - return err - } - if target.CanSet() { - target.SetBool(b) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(int64(0))) - target.SetInt(rd.Int()) - } - return nil - } - i, err := strconv.ParseInt(data, 10, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowInt(i) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetInt(i) - } - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(uint64(0))) - target.SetUint(rd.Uint()) - } - return nil - } - u, err := strconv.ParseUint(data, 10, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowUint(u) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetUint(u) - } - - case reflect.Float32, reflect.Float64: - if data == "" { - if target.CanSet() { - rd := defVal.Convert(reflect.TypeOf(float64(0))) - target.SetFloat(rd.Float()) - } - return nil - } - f, err := strconv.ParseFloat(data, 64) - if err != nil { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.OverflowFloat(f) { - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - if target.CanSet() { - target.SetFloat(f) - } - - case reflect.String: - value := data - if value == "" { - value = defVal.String() - } - // validate string - if target.CanSet() { - target.SetString(value) - } - - case reflect.Ptr: - if data == "" && defVal.Kind() == reflect.Ptr { - if target.CanSet() { - target.Set(defVal) - } - return nil - } - newVal := reflect.New(target.Type().Elem()) - if err := p.setFieldValue(reflect.Indirect(newVal), defVal, data, hasKey); err != nil { - return err - } - if target.CanSet() { - target.Set(newVal) - } - - default: - return errors.InvalidType(p.Name, p.parameter.In, tpe, data) - } - return nil -} - -func (p *untypedParamBinder) tryUnmarshaler(target reflect.Value, defaultValue interface{}, data string) (bool, error) { - if !target.CanSet() { - return false, nil - } - // When a type implements encoding.TextUnmarshaler we'll use that instead of reflecting some more - if reflect.PtrTo(target.Type()).Implements(textUnmarshalType) { - if defaultValue != nil && len(data) == 0 { - target.Set(reflect.ValueOf(defaultValue)) - return true, nil - } - value := reflect.New(target.Type()) - if err := value.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(data)); err != nil { - return true, err - } - target.Set(reflect.Indirect(value)) - return true, nil - } - return false, nil -} - -func (p *untypedParamBinder) readFormattedSliceFieldValue(data string, target reflect.Value) ([]string, bool, error) { - ok, err := p.tryUnmarshaler(target, p.parameter.Default, data) - if err != nil { - return nil, true, err - } - if ok { - return nil, true, nil - } - - return swag.SplitByFormat(data, p.parameter.CollectionFormat), false, nil -} - -func (p *untypedParamBinder) setSliceFieldValue(target reflect.Value, defaultValue interface{}, data []string, hasKey bool) error { - sz := len(data) - if (!hasKey || (!p.parameter.AllowEmptyValue && (sz == 0 || (sz == 1 && data[0] == "")))) && p.parameter.Required && defaultValue == nil { - return errors.Required(p.Name, p.parameter.In, data) - } - - defVal := reflect.Zero(target.Type()) - if defaultValue != nil { - defVal = reflect.ValueOf(defaultValue) - } - - if !target.CanSet() { - return nil - } - if sz == 0 { - target.Set(defVal) - return nil - } - - value := reflect.MakeSlice(reflect.SliceOf(target.Type().Elem()), sz, sz) - - for i := 0; i < sz; i++ { - if err := p.setFieldValue(value.Index(i), nil, data[i], hasKey); err != nil { - return err - } - } - - target.Set(value) - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go b/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go deleted file mode 100644 index 03385251e..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/pre_go18.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !go1.8 - -package middleware - -import "net/url" - -func pathUnescape(path string) (string, error) { - return url.QueryUnescape(path) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go b/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go deleted file mode 100644 index 4be330d6d..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/rapidoc.go +++ /dev/null @@ -1,90 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "html/template" - "net/http" - "path" -) - -// RapiDocOpts configures the RapiDoc middlewares -type RapiDocOpts struct { - // BasePath for the UI path, defaults to: / - BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs - Path string - // SpecURL the url to find the spec for - SpecURL string - // RapiDocURL for the js that generates the rapidoc site, defaults to: https://cdn.jsdelivr.net/npm/rapidoc/bundles/rapidoc.standalone.js - RapiDocURL string - // Title for the documentation site, default to: API documentation - Title string -} - -// EnsureDefaults in case some options are missing -func (r *RapiDocOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } - if r.RapiDocURL == "" { - r.RapiDocURL = rapidocLatest - } - if r.Title == "" { - r.Title = "API documentation" - } -} - -// RapiDoc creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. -// -func RapiDoc(opts RapiDocOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("rapidoc").Parse(rapidocTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - rapidocLatest = "https://unpkg.com/rapidoc/dist/rapidoc-min.js" - rapidocTemplate = ` - - - {{ .Title }} - - - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/redoc.go b/vendor/github.com/go-openapi/runtime/middleware/redoc.go deleted file mode 100644 index 019c85429..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/redoc.go +++ /dev/null @@ -1,103 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "html/template" - "net/http" - "path" -) - -// RedocOpts configures the Redoc middlewares -type RedocOpts struct { - // BasePath for the UI path, defaults to: / - BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs - Path string - // SpecURL the url to find the spec for - SpecURL string - // RedocURL for the js that generates the redoc site, defaults to: https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js - RedocURL string - // Title for the documentation site, default to: API documentation - Title string -} - -// EnsureDefaults in case some options are missing -func (r *RedocOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } - if r.RedocURL == "" { - r.RedocURL = redocLatest - } - if r.Title == "" { - r.Title = "API documentation" - } -} - -// Redoc creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. -// -func Redoc(opts RedocOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("redoc").Parse(redocTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - redocLatest = "https://cdn.jsdelivr.net/npm/redoc/bundles/redoc.standalone.js" - redocTemplate = ` - - - {{ .Title }} - - - - - - - - - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/request.go b/vendor/github.com/go-openapi/runtime/middleware/request.go deleted file mode 100644 index 760c37861..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/request.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - "reflect" - - "github.com/go-openapi/errors" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// UntypedRequestBinder binds and validates the data from a http request -type UntypedRequestBinder struct { - Spec *spec.Swagger - Parameters map[string]spec.Parameter - Formats strfmt.Registry - paramBinders map[string]*untypedParamBinder -} - -// NewUntypedRequestBinder creates a new binder for reading a request. -func NewUntypedRequestBinder(parameters map[string]spec.Parameter, spec *spec.Swagger, formats strfmt.Registry) *UntypedRequestBinder { - binders := make(map[string]*untypedParamBinder) - for fieldName, param := range parameters { - binders[fieldName] = newUntypedParamBinder(param, spec, formats) - } - return &UntypedRequestBinder{ - Parameters: parameters, - paramBinders: binders, - Spec: spec, - Formats: formats, - } -} - -// Bind perform the databinding and validation -func (o *UntypedRequestBinder) Bind(request *http.Request, routeParams RouteParams, consumer runtime.Consumer, data interface{}) error { - val := reflect.Indirect(reflect.ValueOf(data)) - isMap := val.Kind() == reflect.Map - var result []error - debugLog("binding %d parameters for %s %s", len(o.Parameters), request.Method, request.URL.EscapedPath()) - for fieldName, param := range o.Parameters { - binder := o.paramBinders[fieldName] - debugLog("binding parameter %s for %s %s", fieldName, request.Method, request.URL.EscapedPath()) - var target reflect.Value - if !isMap { - binder.Name = fieldName - target = val.FieldByName(fieldName) - } - - if isMap { - tpe := binder.Type() - if tpe == nil { - if param.Schema.Type.Contains("array") { - tpe = reflect.TypeOf([]interface{}{}) - } else { - tpe = reflect.TypeOf(map[string]interface{}{}) - } - } - target = reflect.Indirect(reflect.New(tpe)) - } - - if !target.IsValid() { - result = append(result, errors.New(500, "parameter name %q is an unknown field", binder.Name)) - continue - } - - if err := binder.Bind(request, routeParams, consumer, target); err != nil { - result = append(result, err) - continue - } - - if binder.validator != nil { - rr := binder.validator.Validate(target.Interface()) - if rr != nil && rr.HasErrors() { - result = append(result, rr.AsError()) - } - } - - if isMap { - val.SetMapIndex(reflect.ValueOf(param.Name), target) - } - } - - if len(result) > 0 { - return errors.CompositeValidationError(result...) - } - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/router.go b/vendor/github.com/go-openapi/runtime/middleware/router.go deleted file mode 100644 index 5052031c8..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/router.go +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "fmt" - "net/http" - fpath "path" - "regexp" - "strings" - - "github.com/go-openapi/runtime/security" - "github.com/go-openapi/swag" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" - "github.com/go-openapi/runtime/middleware/denco" -) - -// RouteParam is a object to capture route params in a framework agnostic way. -// implementations of the muxer should use these route params to communicate with the -// swagger framework -type RouteParam struct { - Name string - Value string -} - -// RouteParams the collection of route params -type RouteParams []RouteParam - -// Get gets the value for the route param for the specified key -func (r RouteParams) Get(name string) string { - vv, _, _ := r.GetOK(name) - if len(vv) > 0 { - return vv[len(vv)-1] - } - return "" -} - -// GetOK gets the value but also returns booleans to indicate if a key or value -// is present. This aids in validation and satisfies an interface in use there -// -// The returned values are: data, has key, has value -func (r RouteParams) GetOK(name string) ([]string, bool, bool) { - for _, p := range r { - if p.Name == name { - return []string{p.Value}, true, p.Value != "" - } - } - return nil, false, false -} - -// NewRouter creates a new context aware router middleware -func NewRouter(ctx *Context, next http.Handler) http.Handler { - if ctx.router == nil { - ctx.router = DefaultRouter(ctx.spec, ctx.api) - } - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if _, rCtx, ok := ctx.RouteInfo(r); ok { - next.ServeHTTP(rw, rCtx) - return - } - - // Not found, check if it exists in the other methods first - if others := ctx.AllowedMethods(r); len(others) > 0 { - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.MethodNotAllowed(r.Method, others)) - return - } - - ctx.Respond(rw, r, ctx.analyzer.RequiredProduces(), nil, errors.NotFound("path %s was not found", r.URL.EscapedPath())) - }) -} - -// RoutableAPI represents an interface for things that can serve -// as a provider of implementations for the swagger router -type RoutableAPI interface { - HandlerFor(string, string) (http.Handler, bool) - ServeErrorFor(string) func(http.ResponseWriter, *http.Request, error) - ConsumersFor([]string) map[string]runtime.Consumer - ProducersFor([]string) map[string]runtime.Producer - AuthenticatorsFor(map[string]spec.SecurityScheme) map[string]runtime.Authenticator - Authorizer() runtime.Authorizer - Formats() strfmt.Registry - DefaultProduces() string - DefaultConsumes() string -} - -// Router represents a swagger aware router -type Router interface { - Lookup(method, path string) (*MatchedRoute, bool) - OtherMethods(method, path string) []string -} - -type defaultRouteBuilder struct { - spec *loads.Document - analyzer *analysis.Spec - api RoutableAPI - records map[string][]denco.Record -} - -type defaultRouter struct { - spec *loads.Document - routers map[string]*denco.Router -} - -func newDefaultRouteBuilder(spec *loads.Document, api RoutableAPI) *defaultRouteBuilder { - return &defaultRouteBuilder{ - spec: spec, - analyzer: analysis.New(spec.Spec()), - api: api, - records: make(map[string][]denco.Record), - } -} - -// DefaultRouter creates a default implemenation of the router -func DefaultRouter(spec *loads.Document, api RoutableAPI) Router { - builder := newDefaultRouteBuilder(spec, api) - if spec != nil { - for method, paths := range builder.analyzer.Operations() { - for path, operation := range paths { - fp := fpath.Join(spec.BasePath(), path) - debugLog("adding route %s %s %q", method, fp, operation.ID) - builder.AddRoute(method, fp, operation) - } - } - } - return builder.Build() -} - -// RouteAuthenticator is an authenticator that can compose several authenticators together. -// It also knows when it contains an authenticator that allows for anonymous pass through. -// Contains a group of 1 or more authenticators that have a logical AND relationship -type RouteAuthenticator struct { - Authenticator map[string]runtime.Authenticator - Schemes []string - Scopes map[string][]string - allScopes []string - commonScopes []string - allowAnonymous bool -} - -func (ra *RouteAuthenticator) AllowsAnonymous() bool { - return ra.allowAnonymous -} - -// AllScopes returns a list of unique scopes that is the combination -// of all the scopes in the requirements -func (ra *RouteAuthenticator) AllScopes() []string { - return ra.allScopes -} - -// CommonScopes returns a list of unique scopes that are common in all the -// scopes in the requirements -func (ra *RouteAuthenticator) CommonScopes() []string { - return ra.commonScopes -} - -// Authenticate Authenticator interface implementation -func (ra *RouteAuthenticator) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { - if ra.allowAnonymous { - route.Authenticator = ra - return true, nil, nil - } - // iterate in proper order - var lastResult interface{} - for _, scheme := range ra.Schemes { - if authenticator, ok := ra.Authenticator[scheme]; ok { - applies, princ, err := authenticator.Authenticate(&security.ScopedAuthRequest{ - Request: req, - RequiredScopes: ra.Scopes[scheme], - }) - if !applies { - return false, nil, nil - } - if err != nil { - route.Authenticator = ra - return true, nil, err - } - lastResult = princ - } - } - route.Authenticator = ra - return true, lastResult, nil -} - -func stringSliceUnion(slices ...[]string) []string { - unique := make(map[string]struct{}) - var result []string - for _, slice := range slices { - for _, entry := range slice { - if _, ok := unique[entry]; ok { - continue - } - unique[entry] = struct{}{} - result = append(result, entry) - } - } - return result -} - -func stringSliceIntersection(slices ...[]string) []string { - unique := make(map[string]int) - var intersection []string - - total := len(slices) - var emptyCnt int - for _, slice := range slices { - if len(slice) == 0 { - emptyCnt++ - continue - } - - for _, entry := range slice { - unique[entry]++ - if unique[entry] == total-emptyCnt { // this entry appeared in all the non-empty slices - intersection = append(intersection, entry) - } - } - } - - return intersection -} - -// RouteAuthenticators represents a group of authenticators that represent a logical OR -type RouteAuthenticators []RouteAuthenticator - -// AllowsAnonymous returns true when there is an authenticator that means optional auth -func (ras RouteAuthenticators) AllowsAnonymous() bool { - for _, ra := range ras { - if ra.AllowsAnonymous() { - return true - } - } - return false -} - -// Authenticate method implemention so this collection can be used as authenticator -func (ras RouteAuthenticators) Authenticate(req *http.Request, route *MatchedRoute) (bool, interface{}, error) { - var lastError error - var allowsAnon bool - var anonAuth RouteAuthenticator - - for _, ra := range ras { - if ra.AllowsAnonymous() { - anonAuth = ra - allowsAnon = true - continue - } - applies, usr, err := ra.Authenticate(req, route) - if !applies || err != nil || usr == nil { - if err != nil { - lastError = err - } - continue - } - return applies, usr, nil - } - - if allowsAnon && lastError == nil { - route.Authenticator = &anonAuth - return true, nil, lastError - } - return lastError != nil, nil, lastError -} - -type routeEntry struct { - PathPattern string - BasePath string - Operation *spec.Operation - Consumes []string - Consumers map[string]runtime.Consumer - Produces []string - Producers map[string]runtime.Producer - Parameters map[string]spec.Parameter - Handler http.Handler - Formats strfmt.Registry - Binder *UntypedRequestBinder - Authenticators RouteAuthenticators - Authorizer runtime.Authorizer -} - -// MatchedRoute represents the route that was matched in this request -type MatchedRoute struct { - routeEntry - Params RouteParams - Consumer runtime.Consumer - Producer runtime.Producer - Authenticator *RouteAuthenticator -} - -// HasAuth returns true when the route has a security requirement defined -func (m *MatchedRoute) HasAuth() bool { - return len(m.Authenticators) > 0 -} - -// NeedsAuth returns true when the request still -// needs to perform authentication -func (m *MatchedRoute) NeedsAuth() bool { - return m.HasAuth() && m.Authenticator == nil -} - -func (d *defaultRouter) Lookup(method, path string) (*MatchedRoute, bool) { - mth := strings.ToUpper(method) - debugLog("looking up route for %s %s", method, path) - if Debug { - if len(d.routers) == 0 { - debugLog("there are no known routers") - } - for meth := range d.routers { - debugLog("got a router for %s", meth) - } - } - if router, ok := d.routers[mth]; ok { - if m, rp, ok := router.Lookup(fpath.Clean(path)); ok && m != nil { - if entry, ok := m.(*routeEntry); ok { - debugLog("found a route for %s %s with %d parameters", method, path, len(entry.Parameters)) - var params RouteParams - for _, p := range rp { - v, err := pathUnescape(p.Value) - if err != nil { - debugLog("failed to escape %q: %v", p.Value, err) - v = p.Value - } - // a workaround to handle fragment/composing parameters until they are supported in denco router - // check if this parameter is a fragment within a path segment - if xpos := strings.Index(entry.PathPattern, fmt.Sprintf("{%s}", p.Name)) + len(p.Name) + 2; xpos < len(entry.PathPattern) && entry.PathPattern[xpos] != '/' { - // extract fragment parameters - ep := strings.Split(entry.PathPattern[xpos:], "/")[0] - pnames, pvalues := decodeCompositParams(p.Name, v, ep, nil, nil) - for i, pname := range pnames { - params = append(params, RouteParam{Name: pname, Value: pvalues[i]}) - } - } else { - // use the parameter directly - params = append(params, RouteParam{Name: p.Name, Value: v}) - } - } - return &MatchedRoute{routeEntry: *entry, Params: params}, true - } - } else { - debugLog("couldn't find a route by path for %s %s", method, path) - } - } else { - debugLog("couldn't find a route by method for %s %s", method, path) - } - return nil, false -} - -func (d *defaultRouter) OtherMethods(method, path string) []string { - mn := strings.ToUpper(method) - var methods []string - for k, v := range d.routers { - if k != mn { - if _, _, ok := v.Lookup(fpath.Clean(path)); ok { - methods = append(methods, k) - continue - } - } - } - return methods -} - -// convert swagger parameters per path segment into a denco parameter as multiple parameters per segment are not supported in denco -var pathConverter = regexp.MustCompile(`{(.+?)}([^/]*)`) - -func decodeCompositParams(name string, value string, pattern string, names []string, values []string) ([]string, []string) { - pleft := strings.Index(pattern, "{") - names = append(names, name) - if pleft < 0 { - if strings.HasSuffix(value, pattern) { - values = append(values, value[:len(value)-len(pattern)]) - } else { - values = append(values, "") - } - } else { - toskip := pattern[:pleft] - pright := strings.Index(pattern, "}") - vright := strings.Index(value, toskip) - if vright >= 0 { - values = append(values, value[:vright]) - } else { - values = append(values, "") - value = "" - } - return decodeCompositParams(pattern[pleft+1:pright], value[vright+len(toskip):], pattern[pright+1:], names, values) - } - return names, values -} - -func (d *defaultRouteBuilder) AddRoute(method, path string, operation *spec.Operation) { - mn := strings.ToUpper(method) - - bp := fpath.Clean(d.spec.BasePath()) - if len(bp) > 0 && bp[len(bp)-1] == '/' { - bp = bp[:len(bp)-1] - } - - debugLog("operation: %#v", *operation) - if handler, ok := d.api.HandlerFor(method, strings.TrimPrefix(path, bp)); ok { - consumes := d.analyzer.ConsumesFor(operation) - produces := d.analyzer.ProducesFor(operation) - parameters := d.analyzer.ParamsFor(method, strings.TrimPrefix(path, bp)) - - // add API defaults if not part of the spec - if defConsumes := d.api.DefaultConsumes(); defConsumes != "" && !swag.ContainsStringsCI(consumes, defConsumes) { - consumes = append(consumes, defConsumes) - } - - if defProduces := d.api.DefaultProduces(); defProduces != "" && !swag.ContainsStringsCI(produces, defProduces) { - produces = append(produces, defProduces) - } - - record := denco.NewRecord(pathConverter.ReplaceAllString(path, ":$1"), &routeEntry{ - BasePath: bp, - PathPattern: path, - Operation: operation, - Handler: handler, - Consumes: consumes, - Produces: produces, - Consumers: d.api.ConsumersFor(normalizeOffers(consumes)), - Producers: d.api.ProducersFor(normalizeOffers(produces)), - Parameters: parameters, - Formats: d.api.Formats(), - Binder: NewUntypedRequestBinder(parameters, d.spec.Spec(), d.api.Formats()), - Authenticators: d.buildAuthenticators(operation), - Authorizer: d.api.Authorizer(), - }) - d.records[mn] = append(d.records[mn], record) - } -} - -func (d *defaultRouteBuilder) buildAuthenticators(operation *spec.Operation) RouteAuthenticators { - requirements := d.analyzer.SecurityRequirementsFor(operation) - var auths []RouteAuthenticator - for _, reqs := range requirements { - var schemes []string - scopes := make(map[string][]string, len(reqs)) - var scopeSlices [][]string - for _, req := range reqs { - schemes = append(schemes, req.Name) - scopes[req.Name] = req.Scopes - scopeSlices = append(scopeSlices, req.Scopes) - } - - definitions := d.analyzer.SecurityDefinitionsForRequirements(reqs) - authenticators := d.api.AuthenticatorsFor(definitions) - auths = append(auths, RouteAuthenticator{ - Authenticator: authenticators, - Schemes: schemes, - Scopes: scopes, - allScopes: stringSliceUnion(scopeSlices...), - commonScopes: stringSliceIntersection(scopeSlices...), - allowAnonymous: len(reqs) == 1 && reqs[0].Name == "", - }) - } - return auths -} - -func (d *defaultRouteBuilder) Build() *defaultRouter { - routers := make(map[string]*denco.Router) - for method, records := range d.records { - router := denco.New() - _ = router.Build(records) - routers[method] = router - } - return &defaultRouter{ - spec: d.spec, - routers: routers, - } -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/security.go b/vendor/github.com/go-openapi/runtime/middleware/security.go deleted file mode 100644 index 2b061caef..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/security.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import "net/http" - -func newSecureAPI(ctx *Context, next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - route, rCtx, _ := ctx.RouteInfo(r) - if rCtx != nil { - r = rCtx - } - if route != nil && !route.NeedsAuth() { - next.ServeHTTP(rw, r) - return - } - - _, rCtx, err := ctx.Authorize(r, route) - if err != nil { - ctx.Respond(rw, r, route.Produces, route, err) - return - } - r = rCtx - - next.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/spec.go b/vendor/github.com/go-openapi/runtime/middleware/spec.go deleted file mode 100644 index f02914298..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/spec.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "net/http" - "path" -) - -// Spec creates a middleware to serve a swagger spec. -// This allows for altering the spec before starting the http listener. -// This can be useful if you want to serve the swagger spec from another path than /swagger.json -// -func Spec(basePath string, b []byte, next http.Handler) http.Handler { - if basePath == "" { - basePath = "/" - } - pth := path.Join(basePath, "swagger.json") - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if r.URL.Path == pth { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusOK) - //#nosec - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "application/json") - rw.WriteHeader(http.StatusNotFound) - return - } - next.ServeHTTP(rw, r) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go deleted file mode 100644 index b4dea29e4..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui.go +++ /dev/null @@ -1,168 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "html/template" - "net/http" - "path" -) - -// SwaggerUIOpts configures the Swaggerui middlewares -type SwaggerUIOpts struct { - // BasePath for the UI path, defaults to: / - BasePath string - // Path combines with BasePath for the full UI path, defaults to: docs - Path string - // SpecURL the url to find the spec for - SpecURL string - // OAuthCallbackURL the url called after OAuth2 login - OAuthCallbackURL string - - // The three components needed to embed swagger-ui - SwaggerURL string - SwaggerPresetURL string - SwaggerStylesURL string - - Favicon32 string - Favicon16 string - - // Title for the documentation site, default to: API documentation - Title string -} - -// EnsureDefaults in case some options are missing -func (r *SwaggerUIOpts) EnsureDefaults() { - if r.BasePath == "" { - r.BasePath = "/" - } - if r.Path == "" { - r.Path = "docs" - } - if r.SpecURL == "" { - r.SpecURL = "/swagger.json" - } - if r.OAuthCallbackURL == "" { - r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback") - } - if r.SwaggerURL == "" { - r.SwaggerURL = swaggerLatest - } - if r.SwaggerPresetURL == "" { - r.SwaggerPresetURL = swaggerPresetLatest - } - if r.SwaggerStylesURL == "" { - r.SwaggerStylesURL = swaggerStylesLatest - } - if r.Favicon16 == "" { - r.Favicon16 = swaggerFavicon16Latest - } - if r.Favicon32 == "" { - r.Favicon32 = swaggerFavicon32Latest - } - if r.Title == "" { - r.Title = "API documentation" - } -} - -// SwaggerUI creates a middleware to serve a documentation site for a swagger spec. -// This allows for altering the spec before starting the http listener. -func SwaggerUI(opts SwaggerUIOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := path.Join(opts.BasePath, opts.Path) - tmpl := template.Must(template.New("swaggerui").Parse(swaggeruiTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, &opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path.Join(r.URL.Path) == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - swaggerLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-bundle.js" - swaggerPresetLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui-standalone-preset.js" - swaggerStylesLatest = "https://unpkg.com/swagger-ui-dist/swagger-ui.css" - swaggerFavicon32Latest = "https://unpkg.com/swagger-ui-dist/favicon-32x32.png" - swaggerFavicon16Latest = "https://unpkg.com/swagger-ui-dist/favicon-16x16.png" - swaggeruiTemplate = ` - - - - - {{ .Title }} - - - - - - - - -
- - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go b/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go deleted file mode 100644 index 576f6003f..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/swaggerui_oauth2.go +++ /dev/null @@ -1,122 +0,0 @@ -package middleware - -import ( - "bytes" - "fmt" - "net/http" - "path" - "text/template" -) - -func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler { - opts.EnsureDefaults() - - pth := opts.OAuthCallbackURL - tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate)) - - buf := bytes.NewBuffer(nil) - _ = tmpl.Execute(buf, &opts) - b := buf.Bytes() - - return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - if path.Join(r.URL.Path) == pth { - rw.Header().Set("Content-Type", "text/html; charset=utf-8") - rw.WriteHeader(http.StatusOK) - - _, _ = rw.Write(b) - return - } - - if next == nil { - rw.Header().Set("Content-Type", "text/plain") - rw.WriteHeader(http.StatusNotFound) - _, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth))) - return - } - next.ServeHTTP(rw, r) - }) -} - -const ( - swaggerOAuthTemplate = ` - - - - {{ .Title }} - - - - - -` -) diff --git a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go b/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go deleted file mode 100644 index 39a85f7d9..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/untyped/api.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package untyped - -import ( - "fmt" - "net/http" - "sort" - "strings" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/errors" - "github.com/go-openapi/loads" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - - "github.com/go-openapi/runtime" -) - -// NewAPI creates the default untyped API -func NewAPI(spec *loads.Document) *API { - var an *analysis.Spec - if spec != nil && spec.Spec() != nil { - an = analysis.New(spec.Spec()) - } - api := &API{ - spec: spec, - analyzer: an, - consumers: make(map[string]runtime.Consumer, 10), - producers: make(map[string]runtime.Producer, 10), - authenticators: make(map[string]runtime.Authenticator), - operations: make(map[string]map[string]runtime.OperationHandler), - ServeError: errors.ServeError, - Models: make(map[string]func() interface{}), - formats: strfmt.NewFormats(), - } - return api.WithJSONDefaults() -} - -// API represents an untyped mux for a swagger spec -type API struct { - spec *loads.Document - analyzer *analysis.Spec - DefaultProduces string - DefaultConsumes string - consumers map[string]runtime.Consumer - producers map[string]runtime.Producer - authenticators map[string]runtime.Authenticator - authorizer runtime.Authorizer - operations map[string]map[string]runtime.OperationHandler - ServeError func(http.ResponseWriter, *http.Request, error) - Models map[string]func() interface{} - formats strfmt.Registry -} - -// WithJSONDefaults loads the json defaults for this api -func (d *API) WithJSONDefaults() *API { - d.DefaultConsumes = runtime.JSONMime - d.DefaultProduces = runtime.JSONMime - d.consumers[runtime.JSONMime] = runtime.JSONConsumer() - d.producers[runtime.JSONMime] = runtime.JSONProducer() - return d -} - -// WithoutJSONDefaults clears the json defaults for this api -func (d *API) WithoutJSONDefaults() *API { - d.DefaultConsumes = "" - d.DefaultProduces = "" - delete(d.consumers, runtime.JSONMime) - delete(d.producers, runtime.JSONMime) - return d -} - -// Formats returns the registered string formats -func (d *API) Formats() strfmt.Registry { - if d.formats == nil { - d.formats = strfmt.NewFormats() - } - return d.formats -} - -// RegisterFormat registers a custom format validator -func (d *API) RegisterFormat(name string, format strfmt.Format, validator strfmt.Validator) { - if d.formats == nil { - d.formats = strfmt.NewFormats() - } - d.formats.Add(name, format, validator) -} - -// RegisterAuth registers an auth handler in this api -func (d *API) RegisterAuth(scheme string, handler runtime.Authenticator) { - if d.authenticators == nil { - d.authenticators = make(map[string]runtime.Authenticator) - } - d.authenticators[scheme] = handler -} - -// RegisterAuthorizer registers an authorizer handler in this api -func (d *API) RegisterAuthorizer(handler runtime.Authorizer) { - d.authorizer = handler -} - -// RegisterConsumer registers a consumer for a media type. -func (d *API) RegisterConsumer(mediaType string, handler runtime.Consumer) { - if d.consumers == nil { - d.consumers = make(map[string]runtime.Consumer, 10) - } - d.consumers[strings.ToLower(mediaType)] = handler -} - -// RegisterProducer registers a producer for a media type -func (d *API) RegisterProducer(mediaType string, handler runtime.Producer) { - if d.producers == nil { - d.producers = make(map[string]runtime.Producer, 10) - } - d.producers[strings.ToLower(mediaType)] = handler -} - -// RegisterOperation registers an operation handler for an operation name -func (d *API) RegisterOperation(method, path string, handler runtime.OperationHandler) { - if d.operations == nil { - d.operations = make(map[string]map[string]runtime.OperationHandler, 30) - } - um := strings.ToUpper(method) - if b, ok := d.operations[um]; !ok || b == nil { - d.operations[um] = make(map[string]runtime.OperationHandler) - } - d.operations[um][path] = handler -} - -// OperationHandlerFor returns the operation handler for the specified id if it can be found -func (d *API) OperationHandlerFor(method, path string) (runtime.OperationHandler, bool) { - if d.operations == nil { - return nil, false - } - if pi, ok := d.operations[strings.ToUpper(method)]; ok { - h, ok := pi[path] - return h, ok - } - return nil, false -} - -// ConsumersFor gets the consumers for the specified media types -func (d *API) ConsumersFor(mediaTypes []string) map[string]runtime.Consumer { - result := make(map[string]runtime.Consumer) - for _, mt := range mediaTypes { - if consumer, ok := d.consumers[mt]; ok { - result[mt] = consumer - } - } - return result -} - -// ProducersFor gets the producers for the specified media types -func (d *API) ProducersFor(mediaTypes []string) map[string]runtime.Producer { - result := make(map[string]runtime.Producer) - for _, mt := range mediaTypes { - if producer, ok := d.producers[mt]; ok { - result[mt] = producer - } - } - return result -} - -// AuthenticatorsFor gets the authenticators for the specified security schemes -func (d *API) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]runtime.Authenticator { - result := make(map[string]runtime.Authenticator) - for k := range schemes { - if a, ok := d.authenticators[k]; ok { - result[k] = a - } - } - return result -} - -// Authorizer returns the registered authorizer -func (d *API) Authorizer() runtime.Authorizer { - return d.authorizer -} - -// Validate validates this API for any missing items -func (d *API) Validate() error { - return d.validate() -} - -// validateWith validates the registrations in this API against the provided spec analyzer -func (d *API) validate() error { - var consumes []string - for k := range d.consumers { - consumes = append(consumes, k) - } - - var produces []string - for k := range d.producers { - produces = append(produces, k) - } - - var authenticators []string - for k := range d.authenticators { - authenticators = append(authenticators, k) - } - - var operations []string - for m, v := range d.operations { - for p := range v { - operations = append(operations, fmt.Sprintf("%s %s", strings.ToUpper(m), p)) - } - } - - var definedAuths []string - for k := range d.spec.Spec().SecurityDefinitions { - definedAuths = append(definedAuths, k) - } - - if err := d.verify("consumes", consumes, d.analyzer.RequiredConsumes()); err != nil { - return err - } - if err := d.verify("produces", produces, d.analyzer.RequiredProduces()); err != nil { - return err - } - if err := d.verify("operation", operations, d.analyzer.OperationMethodPaths()); err != nil { - return err - } - - requiredAuths := d.analyzer.RequiredSecuritySchemes() - if err := d.verify("auth scheme", authenticators, requiredAuths); err != nil { - return err - } - if err := d.verify("security definitions", definedAuths, requiredAuths); err != nil { - return err - } - return nil -} - -func (d *API) verify(name string, registrations []string, expectations []string) error { - sort.Strings(registrations) - sort.Strings(expectations) - - expected := map[string]struct{}{} - seen := map[string]struct{}{} - - for _, v := range expectations { - expected[v] = struct{}{} - } - - var unspecified []string - for _, v := range registrations { - seen[v] = struct{}{} - if _, ok := expected[v]; !ok { - unspecified = append(unspecified, v) - } - } - - for k := range seen { - delete(expected, k) - } - - var unregistered []string - for k := range expected { - unregistered = append(unregistered, k) - } - sort.Strings(unspecified) - sort.Strings(unregistered) - - if len(unregistered) > 0 || len(unspecified) > 0 { - return &errors.APIVerificationFailed{ - Section: name, - MissingSpecification: unspecified, - MissingRegistration: unregistered, - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/runtime/middleware/validation.go b/vendor/github.com/go-openapi/runtime/middleware/validation.go deleted file mode 100644 index 1f0135b57..000000000 --- a/vendor/github.com/go-openapi/runtime/middleware/validation.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package middleware - -import ( - "mime" - "net/http" - "strings" - - "github.com/go-openapi/errors" - "github.com/go-openapi/swag" - - "github.com/go-openapi/runtime" -) - -type validation struct { - context *Context - result []error - request *http.Request - route *MatchedRoute - bound map[string]interface{} -} - -// ContentType validates the content type of a request -func validateContentType(allowed []string, actual string) error { - debugLog("validating content type for %q against [%s]", actual, strings.Join(allowed, ", ")) - if len(allowed) == 0 { - return nil - } - mt, _, err := mime.ParseMediaType(actual) - if err != nil { - return errors.InvalidContentType(actual, allowed) - } - if swag.ContainsStringsCI(allowed, mt) { - return nil - } - if swag.ContainsStringsCI(allowed, "*/*") { - return nil - } - parts := strings.Split(actual, "/") - if len(parts) == 2 && swag.ContainsStringsCI(allowed, parts[0]+"/*") { - return nil - } - return errors.InvalidContentType(actual, allowed) -} - -func validateRequest(ctx *Context, request *http.Request, route *MatchedRoute) *validation { - debugLog("validating request %s %s", request.Method, request.URL.EscapedPath()) - validate := &validation{ - context: ctx, - request: request, - route: route, - bound: make(map[string]interface{}), - } - - validate.contentType() - if len(validate.result) == 0 { - validate.responseFormat() - } - if len(validate.result) == 0 { - validate.parameters() - } - - return validate -} - -func (v *validation) parameters() { - debugLog("validating request parameters for %s %s", v.request.Method, v.request.URL.EscapedPath()) - if result := v.route.Binder.Bind(v.request, v.route.Params, v.route.Consumer, v.bound); result != nil { - if result.Error() == "validation failure list" { - for _, e := range result.(*errors.Validation).Value.([]interface{}) { - v.result = append(v.result, e.(error)) - } - return - } - v.result = append(v.result, result) - } -} - -func (v *validation) contentType() { - if len(v.result) == 0 && runtime.HasBody(v.request) { - debugLog("validating body content type for %s %s", v.request.Method, v.request.URL.EscapedPath()) - ct, _, req, err := v.context.ContentType(v.request) - if err != nil { - v.result = append(v.result, err) - } else { - v.request = req - } - - if len(v.result) == 0 { - if err := validateContentType(v.route.Consumes, ct); err != nil { - v.result = append(v.result, err) - } - } - if ct != "" && v.route.Consumer == nil { - cons, ok := v.route.Consumers[ct] - if !ok { - v.result = append(v.result, errors.New(500, "no consumer registered for %s", ct)) - } else { - v.route.Consumer = cons - } - } - } -} - -func (v *validation) responseFormat() { - // if the route provides values for Produces and no format could be identify then return an error. - // if the route does not specify values for Produces then treat request as valid since the API designer - // choose not to specify the format for responses. - if str, rCtx := v.context.ResponseFormat(v.request, v.route.Produces); str == "" && len(v.route.Produces) > 0 { - v.request = rCtx - v.result = append(v.result, errors.InvalidResponseFormat(v.request.Header.Get(runtime.HeaderAccept), v.route.Produces)) - } -} diff --git a/vendor/github.com/go-openapi/runtime/request.go b/vendor/github.com/go-openapi/runtime/request.go deleted file mode 100644 index 078fda173..000000000 --- a/vendor/github.com/go-openapi/runtime/request.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bufio" - "io" - "net/http" - "strings" - - "github.com/go-openapi/swag" -) - -// CanHaveBody returns true if this method can have a body -func CanHaveBody(method string) bool { - mn := strings.ToUpper(method) - return mn == "POST" || mn == "PUT" || mn == "PATCH" || mn == "DELETE" -} - -// IsSafe returns true if this is a request with a safe method -func IsSafe(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn == "GET" || mn == "HEAD" -} - -// AllowsBody returns true if the request allows for a body -func AllowsBody(r *http.Request) bool { - mn := strings.ToUpper(r.Method) - return mn != "HEAD" -} - -// HasBody returns true if this method needs a content-type -func HasBody(r *http.Request) bool { - // happy case: we have a content length set - if r.ContentLength > 0 { - return true - } - - if r.Header.Get("content-length") != "" { - // in this case, no Transfer-Encoding should be present - // we have a header set but it was explicitly set to 0, so we assume no body - return false - } - - rdr := newPeekingReader(r.Body) - r.Body = rdr - return rdr.HasContent() -} - -func newPeekingReader(r io.ReadCloser) *peekingReader { - if r == nil { - return nil - } - return &peekingReader{ - underlying: bufio.NewReader(r), - orig: r, - } -} - -type peekingReader struct { - underlying interface { - Buffered() int - Peek(int) ([]byte, error) - Read([]byte) (int, error) - } - orig io.ReadCloser -} - -func (p *peekingReader) HasContent() bool { - if p == nil { - return false - } - if p.underlying.Buffered() > 0 { - return true - } - b, err := p.underlying.Peek(1) - if err != nil { - return false - } - return len(b) > 0 -} - -func (p *peekingReader) Read(d []byte) (int, error) { - if p == nil { - return 0, io.EOF - } - return p.underlying.Read(d) -} - -func (p *peekingReader) Close() error { - p.underlying = nil - if p.orig != nil { - return p.orig.Close() - } - return nil -} - -// JSONRequest creates a new http request with json headers set -func JSONRequest(method, urlStr string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, urlStr, body) - if err != nil { - return nil, err - } - req.Header.Add(HeaderContentType, JSONMime) - req.Header.Add(HeaderAccept, JSONMime) - return req, nil -} - -// Gettable for things with a method GetOK(string) (data string, hasKey bool, hasValue bool) -type Gettable interface { - GetOK(string) ([]string, bool, bool) -} - -// ReadSingleValue reads a single value from the source -func ReadSingleValue(values Gettable, name string) string { - vv, _, hv := values.GetOK(name) - if hv { - return vv[len(vv)-1] - } - return "" -} - -// ReadCollectionValue reads a collection value from a string data source -func ReadCollectionValue(values Gettable, name, collectionFormat string) []string { - v := ReadSingleValue(values, name) - return swag.SplitByFormat(v, collectionFormat) -} diff --git a/vendor/github.com/go-openapi/runtime/security/authenticator.go b/vendor/github.com/go-openapi/runtime/security/authenticator.go deleted file mode 100644 index c3ffdac7e..000000000 --- a/vendor/github.com/go-openapi/runtime/security/authenticator.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package security - -import ( - "context" - "net/http" - "strings" - - "github.com/go-openapi/errors" - - "github.com/go-openapi/runtime" -) - -const ( - query = "query" - header = "header" -) - -// HttpAuthenticator is a function that authenticates a HTTP request -func HttpAuthenticator(handler func(*http.Request) (bool, interface{}, error)) runtime.Authenticator { - return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { - if request, ok := params.(*http.Request); ok { - return handler(request) - } - if scoped, ok := params.(*ScopedAuthRequest); ok { - return handler(scoped.Request) - } - return false, nil, nil - }) -} - -// ScopedAuthenticator is a function that authenticates a HTTP request against a list of valid scopes -func ScopedAuthenticator(handler func(*ScopedAuthRequest) (bool, interface{}, error)) runtime.Authenticator { - return runtime.AuthenticatorFunc(func(params interface{}) (bool, interface{}, error) { - if request, ok := params.(*ScopedAuthRequest); ok { - return handler(request) - } - return false, nil, nil - }) -} - -// UserPassAuthentication authentication function -type UserPassAuthentication func(string, string) (interface{}, error) - -// UserPassAuthenticationCtx authentication function with context.Context -type UserPassAuthenticationCtx func(context.Context, string, string) (context.Context, interface{}, error) - -// TokenAuthentication authentication function -type TokenAuthentication func(string) (interface{}, error) - -// TokenAuthenticationCtx authentication function with context.Context -type TokenAuthenticationCtx func(context.Context, string) (context.Context, interface{}, error) - -// ScopedTokenAuthentication authentication function -type ScopedTokenAuthentication func(string, []string) (interface{}, error) - -// ScopedTokenAuthenticationCtx authentication function with context.Context -type ScopedTokenAuthenticationCtx func(context.Context, string, []string) (context.Context, interface{}, error) - -var DefaultRealmName = "API" - -type secCtxKey uint8 - -const ( - failedBasicAuth secCtxKey = iota - oauth2SchemeName -) - -func FailedBasicAuth(r *http.Request) string { - return FailedBasicAuthCtx(r.Context()) -} - -func FailedBasicAuthCtx(ctx context.Context) string { - v, ok := ctx.Value(failedBasicAuth).(string) - if !ok { - return "" - } - return v -} - -func OAuth2SchemeName(r *http.Request) string { - return OAuth2SchemeNameCtx(r.Context()) -} - -func OAuth2SchemeNameCtx(ctx context.Context) string { - v, ok := ctx.Value(oauth2SchemeName).(string) - if !ok { - return "" - } - return v -} - -// BasicAuth creates a basic auth authenticator with the provided authentication function -func BasicAuth(authenticate UserPassAuthentication) runtime.Authenticator { - return BasicAuthRealm(DefaultRealmName, authenticate) -} - -// BasicAuthRealm creates a basic auth authenticator with the provided authentication function and realm name -func BasicAuthRealm(realm string, authenticate UserPassAuthentication) runtime.Authenticator { - if realm == "" { - realm = DefaultRealmName - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - if usr, pass, ok := r.BasicAuth(); ok { - p, err := authenticate(usr, pass) - if err != nil { - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - } - return true, p, err - } - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - return false, nil, nil - }) -} - -// BasicAuthCtx creates a basic auth authenticator with the provided authentication function with support for context.Context -func BasicAuthCtx(authenticate UserPassAuthenticationCtx) runtime.Authenticator { - return BasicAuthRealmCtx(DefaultRealmName, authenticate) -} - -// BasicAuthRealmCtx creates a basic auth authenticator with the provided authentication function and realm name with support for context.Context -func BasicAuthRealmCtx(realm string, authenticate UserPassAuthenticationCtx) runtime.Authenticator { - if realm == "" { - realm = DefaultRealmName - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - if usr, pass, ok := r.BasicAuth(); ok { - ctx, p, err := authenticate(r.Context(), usr, pass) - if err != nil { - ctx = context.WithValue(ctx, failedBasicAuth, realm) - } - *r = *r.WithContext(ctx) - return true, p, err - } - *r = *r.WithContext(context.WithValue(r.Context(), failedBasicAuth, realm)) - return false, nil, nil - }) -} - -// APIKeyAuth creates an authenticator that uses a token for authorization. -// This token can be obtained from either a header or a query string -func APIKeyAuth(name, in string, authenticate TokenAuthentication) runtime.Authenticator { - inl := strings.ToLower(in) - if inl != query && inl != header { - // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) - } - - var getToken func(*http.Request) string - switch inl { - case header: - getToken = func(r *http.Request) string { return r.Header.Get(name) } - case query: - getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - token := getToken(r) - if token == "" { - return false, nil, nil - } - - p, err := authenticate(token) - return true, p, err - }) -} - -// APIKeyAuthCtx creates an authenticator that uses a token for authorization with support for context.Context. -// This token can be obtained from either a header or a query string -func APIKeyAuthCtx(name, in string, authenticate TokenAuthenticationCtx) runtime.Authenticator { - inl := strings.ToLower(in) - if inl != query && inl != header { - // panic because this is most likely a typo - panic(errors.New(500, "api key auth: in value needs to be either \"query\" or \"header\".")) - } - - var getToken func(*http.Request) string - switch inl { - case header: - getToken = func(r *http.Request) string { return r.Header.Get(name) } - case query: - getToken = func(r *http.Request) string { return r.URL.Query().Get(name) } - } - - return HttpAuthenticator(func(r *http.Request) (bool, interface{}, error) { - token := getToken(r) - if token == "" { - return false, nil, nil - } - - ctx, p, err := authenticate(r.Context(), token) - *r = *r.WithContext(ctx) - return true, p, err - }) -} - -// ScopedAuthRequest contains both a http request and the required scopes for a particular operation -type ScopedAuthRequest struct { - Request *http.Request - RequiredScopes []string -} - -// BearerAuth for use with oauth2 flows -func BearerAuth(name string, authenticate ScopedTokenAuthentication) runtime.Authenticator { - const prefix = "Bearer " - return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { - var token string - hdr := r.Request.Header.Get(runtime.HeaderAuthorization) - if strings.HasPrefix(hdr, prefix) { - token = strings.TrimPrefix(hdr, prefix) - } - if token == "" { - qs := r.Request.URL.Query() - token = qs.Get("access_token") - } - //#nosec - ct, _, _ := runtime.ContentType(r.Request.Header) - if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") - } - - if token == "" { - return false, nil, nil - } - - rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) - *r.Request = *r.Request.WithContext(rctx) - p, err := authenticate(token, r.RequiredScopes) - return true, p, err - }) -} - -// BearerAuthCtx for use with oauth2 flows with support for context.Context. -func BearerAuthCtx(name string, authenticate ScopedTokenAuthenticationCtx) runtime.Authenticator { - const prefix = "Bearer " - return ScopedAuthenticator(func(r *ScopedAuthRequest) (bool, interface{}, error) { - var token string - hdr := r.Request.Header.Get(runtime.HeaderAuthorization) - if strings.HasPrefix(hdr, prefix) { - token = strings.TrimPrefix(hdr, prefix) - } - if token == "" { - qs := r.Request.URL.Query() - token = qs.Get("access_token") - } - //#nosec - ct, _, _ := runtime.ContentType(r.Request.Header) - if token == "" && (ct == "application/x-www-form-urlencoded" || ct == "multipart/form-data") { - token = r.Request.FormValue("access_token") - } - - if token == "" { - return false, nil, nil - } - - rctx := context.WithValue(r.Request.Context(), oauth2SchemeName, name) - ctx, p, err := authenticate(rctx, token, r.RequiredScopes) - *r.Request = *r.Request.WithContext(ctx) - return true, p, err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/security/authorizer.go b/vendor/github.com/go-openapi/runtime/security/authorizer.go deleted file mode 100644 index 00c1a4d6a..000000000 --- a/vendor/github.com/go-openapi/runtime/security/authorizer.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package security - -import ( - "net/http" - - "github.com/go-openapi/runtime" -) - -// Authorized provides a default implementation of the Authorizer interface where all -// requests are authorized (successful) -func Authorized() runtime.Authorizer { - return runtime.AuthorizerFunc(func(_ *http.Request, _ interface{}) error { return nil }) -} diff --git a/vendor/github.com/go-openapi/runtime/statuses.go b/vendor/github.com/go-openapi/runtime/statuses.go deleted file mode 100644 index 3b011a0bf..000000000 --- a/vendor/github.com/go-openapi/runtime/statuses.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -// Statuses lists the most common HTTP status codes to default message -// taken from https://httpstatuses.com/ -var Statuses = map[int]string{ - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 103: "Checkpoint", - 122: "URI too long", - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Request Processed", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi-Status", - 208: "Already Reported", - 226: "IM Used", - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request-URI Too Long", - 415: "Unsupported Media Type", - 416: "Request Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", - 420: "Enhance Your Calm", - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 426: "Upgrade Required", - 428: "Precondition Required", - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 444: "No Response", - 449: "Retry With", - 450: "Blocked by Windows Parental Controls", - 451: "Wrong Exchange Server", - 499: "Client Closed Request", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", - 507: "Insufficient Storage", - 508: "Loop Detected", - 509: "Bandwidth Limit Exceeded", - 510: "Not Extended", - 511: "Network Authentication Required", - 598: "Network read timeout error", - 599: "Network connect timeout error", -} diff --git a/vendor/github.com/go-openapi/runtime/text.go b/vendor/github.com/go-openapi/runtime/text.go deleted file mode 100644 index f33320b7d..000000000 --- a/vendor/github.com/go-openapi/runtime/text.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "bytes" - "encoding" - "errors" - "fmt" - "io" - "reflect" - - "github.com/go-openapi/swag" -) - -// TextConsumer creates a new text consumer -func TextConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - if reader == nil { - return errors.New("TextConsumer requires a reader") // early exit - } - - buf := new(bytes.Buffer) - _, err := buf.ReadFrom(reader) - if err != nil { - return err - } - b := buf.Bytes() - - // If the buffer is empty, no need to unmarshal it, which causes a panic. - if len(b) == 0 { - return nil - } - - if tu, ok := data.(encoding.TextUnmarshaler); ok { - err := tu.UnmarshalText(b) - if err != nil { - return fmt.Errorf("text consumer: %v", err) - } - - return nil - } - - t := reflect.TypeOf(data) - if data != nil && t.Kind() == reflect.Ptr { - v := reflect.Indirect(reflect.ValueOf(data)) - if t.Elem().Kind() == reflect.String { - v.SetString(string(b)) - return nil - } - } - - return fmt.Errorf("%v (%T) is not supported by the TextConsumer, %s", - data, data, "can be resolved by supporting TextUnmarshaler interface") - }) -} - -// TextProducer creates a new text producer -func TextProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - if writer == nil { - return errors.New("TextProducer requires a writer") // early exit - } - - if data == nil { - return errors.New("no data given to produce text from") - } - - if tm, ok := data.(encoding.TextMarshaler); ok { - txt, err := tm.MarshalText() - if err != nil { - return fmt.Errorf("text producer: %v", err) - } - _, err = writer.Write(txt) - return err - } - - if str, ok := data.(error); ok { - _, err := writer.Write([]byte(str.Error())) - return err - } - - if str, ok := data.(fmt.Stringer); ok { - _, err := writer.Write([]byte(str.String())) - return err - } - - v := reflect.Indirect(reflect.ValueOf(data)) - if t := v.Type(); t.Kind() == reflect.Struct || t.Kind() == reflect.Slice { - b, err := swag.WriteJSON(data) - if err != nil { - return err - } - _, err = writer.Write(b) - return err - } - if v.Kind() != reflect.String { - return fmt.Errorf("%T is not a supported type by the TextProducer", data) - } - - _, err := writer.Write([]byte(v.String())) - return err - }) -} diff --git a/vendor/github.com/go-openapi/runtime/values.go b/vendor/github.com/go-openapi/runtime/values.go deleted file mode 100644 index 11f5732af..000000000 --- a/vendor/github.com/go-openapi/runtime/values.go +++ /dev/null @@ -1,19 +0,0 @@ -package runtime - -// Values typically represent parameters on a http request. -type Values map[string][]string - -// GetOK returns the values collection for the given key. -// When the key is present in the map it will return true for hasKey. -// When the value is not empty it will return true for hasValue. -func (v Values) GetOK(key string) (value []string, hasKey bool, hasValue bool) { - value, hasKey = v[key] - if !hasKey { - return - } - if len(value) == 0 { - return - } - hasValue = true - return -} diff --git a/vendor/github.com/go-openapi/runtime/xml.go b/vendor/github.com/go-openapi/runtime/xml.go deleted file mode 100644 index 821c7393d..000000000 --- a/vendor/github.com/go-openapi/runtime/xml.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package runtime - -import ( - "encoding/xml" - "io" -) - -// XMLConsumer creates a new XML consumer -func XMLConsumer() Consumer { - return ConsumerFunc(func(reader io.Reader, data interface{}) error { - dec := xml.NewDecoder(reader) - return dec.Decode(data) - }) -} - -// XMLProducer creates a new XML producer -func XMLProducer() Producer { - return ProducerFunc(func(writer io.Writer, data interface{}) error { - enc := xml.NewEncoder(writer) - return enc.Encode(data) - }) -} diff --git a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go b/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go deleted file mode 100644 index b30d37712..000000000 --- a/vendor/github.com/go-openapi/runtime/yamlpc/yaml.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yamlpc - -import ( - "io" - - "github.com/go-openapi/runtime" - - "gopkg.in/yaml.v2" -) - -// YAMLConsumer creates a consumer for yaml data -func YAMLConsumer() runtime.Consumer { - return runtime.ConsumerFunc(func(r io.Reader, v interface{}) error { - dec := yaml.NewDecoder(r) - return dec.Decode(v) - }) -} - -// YAMLProducer creates a producer for yaml data -func YAMLProducer() runtime.Producer { - return runtime.ProducerFunc(func(w io.Writer, v interface{}) error { - enc := yaml.NewEncoder(w) - defer enc.Close() - return enc.Encode(v) - }) -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go index c458b49b2..582f0fd4c 100644 --- a/vendor/github.com/go-openapi/spec/info.go +++ b/vendor/github.com/go-openapi/spec/info.go @@ -16,6 +16,7 @@ package spec import ( "encoding/json" + "strconv" "strings" "github.com/go-openapi/jsonpointer" @@ -40,6 +41,24 @@ func (e Extensions) GetString(key string) (string, bool) { return "", false } +// GetInt gets a int value from the extensions +func (e Extensions) GetInt(key string) (int, bool) { + realKey := strings.ToLower(key) + + if v, ok := e.GetString(realKey); ok { + if r, err := strconv.Atoi(v); err == nil { + return r, true + } + } + + if v, ok := e[realKey]; ok { + if r, rOk := v.(float64); rOk { + return int(r), true + } + } + return -1, false +} + // GetBool gets a string value from the extensions func (e Extensions) GetBool(key string) (bool, bool) { if v, ok := e[strings.ToLower(key)]; ok { diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go index 2af13787a..91d2435f0 100644 --- a/vendor/github.com/go-openapi/spec/properties.go +++ b/vendor/github.com/go-openapi/spec/properties.go @@ -42,8 +42,8 @@ func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { func (items OrderSchemaItems) Len() int { return len(items) } func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } func (items OrderSchemaItems) Less(i, j int) (ret bool) { - ii, oki := items[i].Extensions.GetString("x-order") - ij, okj := items[j].Extensions.GetString("x-order") + ii, oki := items[i].Extensions.GetInt("x-order") + ij, okj := items[j].Extensions.GetInt("x-order") if oki { if okj { defer func() { @@ -56,7 +56,7 @@ func (items OrderSchemaItems) Less(i, j int) (ret bool) { ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() } }() - return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int() + return ii < ij } return true } else if okj { diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go index 4efb6f868..16c3076fe 100644 --- a/vendor/github.com/go-openapi/spec/responses.go +++ b/vendor/github.com/go-openapi/spec/responses.go @@ -19,6 +19,7 @@ import ( "fmt" "reflect" "strconv" + "strings" "github.com/go-openapi/swag" ) @@ -62,6 +63,7 @@ func (r *Responses) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { return err } + if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { return err } @@ -107,20 +109,31 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } + if v, ok := res["default"]; ok { - r.Default = &v + var defaultRes Response + if err := json.Unmarshal(v, &defaultRes); err != nil { + return err + } + r.Default = &defaultRes delete(res, "default") } for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} + if !strings.HasPrefix(k, "x-") { + var statusCodeResp Response + if err := json.Unmarshal(v, &statusCodeResp); err != nil { + return err + } + if nk, err := strconv.Atoi(k); err == nil { + if r.StatusCodeResponses == nil { + r.StatusCodeResponses = map[int]Response{} + } + r.StatusCodeResponses[nk] = statusCodeResp } - r.StatusCodeResponses[nk] = v } } return nil diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go index 8740b1505..a8a3604a2 100644 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ b/vendor/github.com/go-openapi/strfmt/bson.go @@ -142,7 +142,7 @@ func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { // BSON value representation of themselves. The BSON bytes and type can be // assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it // wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { +func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error { var oid bsonprim.ObjectID copy(oid[:], data) *id = ObjectId(oid) diff --git a/vendor/github.com/go-openapi/strfmt/time.go b/vendor/github.com/go-openapi/strfmt/time.go index 6f5a44bb7..9bef4c3b3 100644 --- a/vendor/github.com/go-openapi/strfmt/time.go +++ b/vendor/github.com/go-openapi/strfmt/time.go @@ -133,13 +133,19 @@ func (t DateTime) String() string { } // IsZero returns whether the date time is a zero value -func (t DateTime) IsZero() bool { - return time.Time(t).IsZero() +func (t *DateTime) IsZero() bool { + if t == nil { + return true + } + return time.Time(*t).IsZero() } // IsUnixZerom returns whether the date time is equivalent to time.Unix(0, 0).UTC(). -func (t DateTime) IsUnixZero() bool { - return time.Time(t) == UnixZero +func (t *DateTime) IsUnixZero() bool { + if t == nil { + return true + } + return time.Time(*t).Equal(UnixZero) } // MarshalText implements the text marshaller interface diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b72..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb946b..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be21437..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c0636a..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go deleted file mode 100644 index c318385cb..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad22..000000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore deleted file mode 100644 index c57100a59..000000000 --- a/vendor/github.com/opentracing/opentracing-go/.gitignore +++ /dev/null @@ -1 +0,0 @@ -coverage.txt diff --git a/vendor/github.com/opentracing/opentracing-go/.golangci.yml b/vendor/github.com/opentracing/opentracing-go/.golangci.yml deleted file mode 100644 index 011a79406..000000000 --- a/vendor/github.com/opentracing/opentracing-go/.golangci.yml +++ /dev/null @@ -1,3 +0,0 @@ -linters-settings: - staticcheck: - checks: [ "all", "-SA1019" ] diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md deleted file mode 100644 index d3bfcf623..000000000 --- a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md +++ /dev/null @@ -1,63 +0,0 @@ -Changes by Version -================== - - -1.2.0 (2020-07-01) -------------------- - -* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro -* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed -* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena -* Add Go module support (#215) -- Zaba505 -* Make SetTag helper types in ext public (#229) -- Blake Edwards -* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov -* Improve noop impementation (#223) -- chanxuehong -* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak -* Fix typo in comments (#222) -- meteorlxy -* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅 -* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad - - -1.1.0 (2019-03-23) -------------------- - -Notable changes: -- The library is now released under Apache 2.0 license -- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159)) -- 'golang.org/x/net/context' is replaced with 'context' from the standard library - -List of all changes: - -- Export StartSpanFromContextWithTracer (#214) -- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) -- Use Set() instead of Add() in HTTPHeadersCarrier (#191) -- Update license to Apache 2.0 (#181) -- Replace 'golang.org/x/net/context' with 'context' (#176) -- Port of Python opentracing/harness/api_check.py to Go (#146) -- Fix race condition in MockSpan.Context() (#170) -- Add PeerHostIPv4.SetString() (#155) -- Add a Noop log field type to log to allow for optional fields (#150) - - -1.0.2 (2017-04-26) -------------------- - -- Add more semantic tags (#139) - - -1.0.1 (2017-02-06) -------------------- - -- Correct spelling in comments -- Address race in nextMockID() (#123) -- log: avoid panic marshaling nil error (#131) -- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) -- Drop Go 1.5 that fails in Travis (#129) -- Add convenience methods Key() and Value() to log.Field -- Add convenience methods to log.Field (2 years, 6 months ago) - -1.0.0 (2016-09-26) -------------------- - -- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) - diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE deleted file mode 100644 index f0027349e..000000000 --- a/vendor/github.com/opentracing/opentracing-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 The OpenTracing Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile deleted file mode 100644 index 62abb63f5..000000000 --- a/vendor/github.com/opentracing/opentracing-go/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -.DEFAULT_GOAL := test-and-lint - -.PHONY: test-and-lint -test-and-lint: test lint - -.PHONY: test -test: - go test -v -cover -race ./... - -.PHONY: cover -cover: - go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... - -.PHONY: lint -lint: - go fmt ./... - golint ./... - @# Run again with magic to exit non-zero if golint outputs anything. - @! (golint ./... | read dummy) - go vet ./... diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md deleted file mode 100644 index 6ef1d7c9d..000000000 --- a/vendor/github.com/opentracing/opentracing-go/README.md +++ /dev/null @@ -1,171 +0,0 @@ -[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) - -# OpenTracing API for Go - -This package is a Go platform API for OpenTracing. - -## Required Reading - -In order to understand the Go platform API, one must first be familiar with the -[OpenTracing project](https://opentracing.io) and -[terminology](https://opentracing.io/specification/) more specifically. - -## API overview for those adding instrumentation - -Everyday consumers of this `opentracing` package really only need to worry -about a couple of key abstractions: the `StartSpan` function, the `Span` -interface, and binding a `Tracer` at `main()`-time. Here are code snippets -demonstrating some important use cases. - -#### Singleton initialization - -The simplest starting point is `./default_tracer.go`. As early as possible, call - -```go - import "github.com/opentracing/opentracing-go" - import ".../some_tracing_impl" - - func main() { - opentracing.SetGlobalTracer( - // tracing impl specific: - some_tracing_impl.New(...), - ) - ... - } -``` - -#### Non-Singleton initialization - -If you prefer direct control to singletons, manage ownership of the -`opentracing.Tracer` implementation explicitly. - -#### Creating a Span given an existing Go `context.Context` - -If you use `context.Context` in your application, OpenTracing's Go library will -happily rely on it for `Span` propagation. To start a new (blocking child) -`Span`, you can use `StartSpanFromContext`. - -```go - func xyz(ctx context.Context, ...) { - ... - span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") - defer span.Finish() - span.LogFields( - log.String("event", "soft error"), - log.String("type", "cache timeout"), - log.Int("waited.millis", 1500)) - ... - } -``` - -#### Starting an empty trace by creating a "root span" - -It's always possible to create a "root" `Span` with no parent or other causal -reference. - -```go - func xyz() { - ... - sp := opentracing.StartSpan("operation_name") - defer sp.Finish() - ... - } -``` - -#### Creating a (child) Span given an existing (parent) Span - -```go - func xyz(parentSpan opentracing.Span, ...) { - ... - sp := opentracing.StartSpan( - "operation_name", - opentracing.ChildOf(parentSpan.Context())) - defer sp.Finish() - ... - } -``` - -#### Serializing to the wire - -```go - func makeSomeRequest(ctx context.Context) ... { - if span := opentracing.SpanFromContext(ctx); span != nil { - httpClient := &http.Client{} - httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) - - // Transmit the span's TraceContext as HTTP headers on our - // outbound request. - opentracing.GlobalTracer().Inject( - span.Context(), - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(httpReq.Header)) - - resp, err := httpClient.Do(httpReq) - ... - } - ... - } -``` - -#### Deserializing from the wire - -```go - http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - var serverSpan opentracing.Span - appSpecificOperationName := ... - wireContext, err := opentracing.GlobalTracer().Extract( - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(req.Header)) - if err != nil { - // Optionally record something about err here - } - - // Create the span referring to the RPC client if available. - // If wireContext == nil, a root span will be created. - serverSpan = opentracing.StartSpan( - appSpecificOperationName, - ext.RPCServerOption(wireContext)) - - defer serverSpan.Finish() - - ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) - ... - } -``` - -#### Conditionally capture a field using `log.Noop` - -In some situations, you may want to dynamically decide whether or not -to log a field. For example, you may want to capture additional data, -such as a customer ID, in non-production environments: - -```go - func Customer(order *Order) log.Field { - if os.Getenv("ENVIRONMENT") == "dev" { - return log.String("customer", order.Customer.ID) - } - return log.Noop() - } -``` - -#### Goroutine-safety - -The entire public API is goroutine-safe and does not require external -synchronization. - -## API pointers for those implementing a tracing system - -Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. - -## API compatibility - -For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. - -## Tracer test suite - -A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. - -## Licensing - -[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go deleted file mode 100644 index e11977ebe..000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext.go +++ /dev/null @@ -1,24 +0,0 @@ -package opentracing - -import ( - "context" -) - -// TracerContextWithSpanExtension is an extension interface that the -// implementation of the Tracer interface may want to implement. It -// allows to have some control over the go context when the -// ContextWithSpan is invoked. -// -// The primary purpose of this extension are adapters from opentracing -// API to some other tracing API. -type TracerContextWithSpanExtension interface { - // ContextWithSpanHook gets called by the ContextWithSpan - // function, when the Tracer implementation also implements - // this interface. It allows to put extra information into the - // context and make it available to the callers of the - // ContextWithSpan. - // - // This hook is invoked before the ContextWithSpan function - // actually puts the span into the context. - ContextWithSpanHook(ctx context.Context, span Span) context.Context -} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go deleted file mode 100644 index 8282bd758..000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/field.go +++ /dev/null @@ -1,17 +0,0 @@ -package ext - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -// LogError sets the error=true tag on the Span and logs err as an "error" event. -func LogError(span opentracing.Span, err error, fields ...log.Field) { - Error.Set(span, true) - ef := []log.Field{ - log.Event("error"), - log.Error(err), - } - ef = append(ef, fields...) - span.LogFields(ef...) -} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go deleted file mode 100644 index a414b5951..000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ /dev/null @@ -1,215 +0,0 @@ -package ext - -import "github.com/opentracing/opentracing-go" - -// These constants define common tag names recommended for better portability across -// tracing systems and languages/platforms. -// -// The tag names are defined as typed strings, so that in addition to the usual use -// -// span.setTag(TagName, value) -// -// they also support value type validation via this additional syntax: -// -// TagName.Set(span, value) -// -var ( - ////////////////////////////////////////////////////////////////////// - // SpanKind (client/server or producer/consumer) - ////////////////////////////////////////////////////////////////////// - - // SpanKind hints at relationship between spans, e.g. client/server - SpanKind = spanKindTagName("span.kind") - - // SpanKindRPCClient marks a span representing the client-side of an RPC - // or other remote call - SpanKindRPCClientEnum = SpanKindEnum("client") - SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} - - // SpanKindRPCServer marks a span representing the server-side of an RPC - // or other remote call - SpanKindRPCServerEnum = SpanKindEnum("server") - SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} - - // SpanKindProducer marks a span representing the producer-side of a - // message bus - SpanKindProducerEnum = SpanKindEnum("producer") - SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} - - // SpanKindConsumer marks a span representing the consumer-side of a - // message bus - SpanKindConsumerEnum = SpanKindEnum("consumer") - SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} - - ////////////////////////////////////////////////////////////////////// - // Component name - ////////////////////////////////////////////////////////////////////// - - // Component is a low-cardinality identifier of the module, library, - // or package that is generating a span. - Component = StringTagName("component") - - ////////////////////////////////////////////////////////////////////// - // Sampling hint - ////////////////////////////////////////////////////////////////////// - - // SamplingPriority determines the priority of sampling this Span. - SamplingPriority = Uint16TagName("sampling.priority") - - ////////////////////////////////////////////////////////////////////// - // Peer tags. These tags can be emitted by either client-side or - // server-side to describe the other side/service in a peer-to-peer - // communications, like an RPC call. - ////////////////////////////////////////////////////////////////////// - - // PeerService records the service name of the peer. - PeerService = StringTagName("peer.service") - - // PeerAddress records the address name of the peer. This may be a "ip:port", - // a bare "hostname", a FQDN or even a database DSN substring - // like "mysql://username@127.0.0.1:3306/dbname" - PeerAddress = StringTagName("peer.address") - - // PeerHostname records the host name of the peer - PeerHostname = StringTagName("peer.hostname") - - // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = IPv4TagName("peer.ipv4") - - // PeerHostIPv6 records IP v6 host address of the peer - PeerHostIPv6 = StringTagName("peer.ipv6") - - // PeerPort records port number of the peer - PeerPort = Uint16TagName("peer.port") - - ////////////////////////////////////////////////////////////////////// - // HTTP Tags - ////////////////////////////////////////////////////////////////////// - - // HTTPUrl should be the URL of the request being handled in this segment - // of the trace, in standard URI format. The protocol is optional. - HTTPUrl = StringTagName("http.url") - - // HTTPMethod is the HTTP method of the request, and is case-insensitive. - HTTPMethod = StringTagName("http.method") - - // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the - // HTTP response. - HTTPStatusCode = Uint16TagName("http.status_code") - - ////////////////////////////////////////////////////////////////////// - // DB Tags - ////////////////////////////////////////////////////////////////////// - - // DBInstance is database instance name. - DBInstance = StringTagName("db.instance") - - // DBStatement is a database statement for the given database type. - // It can be a query or a prepared statement (i.e., before substitution). - DBStatement = StringTagName("db.statement") - - // DBType is a database type. For any SQL database, "sql". - // For others, the lower-case database category, e.g. "redis" - DBType = StringTagName("db.type") - - // DBUser is a username for accessing database. - DBUser = StringTagName("db.user") - - ////////////////////////////////////////////////////////////////////// - // Message Bus Tag - ////////////////////////////////////////////////////////////////////// - - // MessageBusDestination is an address at which messages can be exchanged - MessageBusDestination = StringTagName("message_bus.destination") - - ////////////////////////////////////////////////////////////////////// - // Error Tag - ////////////////////////////////////////////////////////////////////// - - // Error indicates that operation represented by the span resulted in an error. - Error = BoolTagName("error") -) - -// --- - -// SpanKindEnum represents common span types -type SpanKindEnum string - -type spanKindTagName string - -// Set adds a string tag to the `span` -func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { - span.SetTag(string(tag), value) -} - -type rpcServerOption struct { - clientContext opentracing.SpanContext -} - -func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { - if r.clientContext != nil { - opentracing.ChildOf(r.clientContext).Apply(o) - } - SpanKindRPCServer.Apply(o) -} - -// RPCServerOption returns a StartSpanOption appropriate for an RPC server span -// with `client` representing the metadata for the remote peer Span if available. -// In case client == nil, due to the client not being instrumented, this RPC -// server span will be a root span. -func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { - return rpcServerOption{client} -} - -// --- - -// StringTagName is a common tag name to be set to a string value -type StringTagName string - -// Set adds a string tag to the `span` -func (tag StringTagName) Set(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint32TagName is a common tag name to be set to a uint32 value -type Uint32TagName string - -// Set adds a uint32 tag to the `span` -func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint16TagName is a common tag name to be set to a uint16 value -type Uint16TagName string - -// Set adds a uint16 tag to the `span` -func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { - span.SetTag(string(tag), value) -} - -// --- - -// BoolTagName is a common tag name to be set to a bool value -type BoolTagName string - -// Set adds a bool tag to the `span` -func (tag BoolTagName) Set(span opentracing.Span, value bool) { - span.SetTag(string(tag), value) -} - -// IPv4TagName is a common tag name to be set to an ipv4 value -type IPv4TagName string - -// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility -func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" -func (tag IPv4TagName) SetString(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go deleted file mode 100644 index 4f7066a92..000000000 --- a/vendor/github.com/opentracing/opentracing-go/globaltracer.go +++ /dev/null @@ -1,42 +0,0 @@ -package opentracing - -type registeredTracer struct { - tracer Tracer - isRegistered bool -} - -var ( - globalTracer = registeredTracer{NoopTracer{}, false} -) - -// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by -// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an -// opentracing.Tracer instance) should call SetGlobalTracer as early as -// possible in main(), prior to calling the `StartSpan` global func below. -// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` -// (etc) globals are noops. -func SetGlobalTracer(tracer Tracer) { - globalTracer = registeredTracer{tracer, true} -} - -// GlobalTracer returns the global singleton `Tracer` implementation. -// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop -// implementation that drops all data handed to it. -func GlobalTracer() Tracer { - return globalTracer.tracer -} - -// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. -func StartSpan(operationName string, opts ...StartSpanOption) Span { - return globalTracer.tracer.StartSpan(operationName, opts...) -} - -// InitGlobalTracer is deprecated. Please use SetGlobalTracer. -func InitGlobalTracer(tracer Tracer) { - SetGlobalTracer(tracer) -} - -// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered -func IsGlobalTracerRegistered() bool { - return globalTracer.isRegistered -} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go deleted file mode 100644 index 1831bc9b2..000000000 --- a/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ /dev/null @@ -1,65 +0,0 @@ -package opentracing - -import "context" - -type contextKey struct{} - -var activeSpanKey = contextKey{} - -// ContextWithSpan returns a new `context.Context` that holds a reference to -// the span. If span is nil, a new context without an active span is returned. -func ContextWithSpan(ctx context.Context, span Span) context.Context { - if span != nil { - if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { - ctx = tracerWithHook.ContextWithSpanHook(ctx, span) - } - } - return context.WithValue(ctx, activeSpanKey, span) -} - -// SpanFromContext returns the `Span` previously associated with `ctx`, or -// `nil` if no such `Span` could be found. -// -// NOTE: context.Context != SpanContext: the former is Go's intra-process -// context propagation mechanism, and the latter houses OpenTracing's per-Span -// identity and baggage information. -func SpanFromContext(ctx context.Context) Span { - val := ctx.Value(activeSpanKey) - if sp, ok := val.(Span); ok { - return sp - } - return nil -} - -// StartSpanFromContext starts and returns a Span with `operationName`, using -// any Span found within `ctx` as a ChildOfRef. If no such parent could be -// found, StartSpanFromContext creates a root (parentless) Span. -// -// The second return value is a context.Context object built around the -// returned Span. -// -// Example usage: -// -// SomeFunction(ctx context.Context, ...) { -// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") -// defer sp.Finish() -// ... -// } -func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { - return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) -} - -// StartSpanFromContextWithTracer starts and returns a span with `operationName` -// using a span found within the context as a ChildOfRef. If that doesn't exist -// it creates a root span. It also returns a context.Context object built -// around the returned span. -// -// It's behavior is identical to StartSpanFromContext except that it takes an explicit -// tracer as opposed to using the global tracer. -func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { - if parentSpan := SpanFromContext(ctx); parentSpan != nil { - opts = append(opts, ChildOf(parentSpan.Context())) - } - span := tracer.StartSpan(operationName, opts...) - return span, ContextWithSpan(ctx, span) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go deleted file mode 100644 index f222ded79..000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/field.go +++ /dev/null @@ -1,282 +0,0 @@ -package log - -import ( - "fmt" - "math" -) - -type fieldType int - -const ( - stringType fieldType = iota - boolType - intType - int32Type - uint32Type - int64Type - uint64Type - float32Type - float64Type - errorType - objectType - lazyLoggerType - noopType -) - -// Field instances are constructed via LogBool, LogString, and so on. -// Tracing implementations may then handle them via the Field.Marshal -// method. -// -// "heavily influenced by" (i.e., partially stolen from) -// https://github.com/uber-go/zap -type Field struct { - key string - fieldType fieldType - numericVal int64 - stringVal string - interfaceVal interface{} -} - -// String adds a string-valued key:value pair to a Span.LogFields() record -func String(key, val string) Field { - return Field{ - key: key, - fieldType: stringType, - stringVal: val, - } -} - -// Bool adds a bool-valued key:value pair to a Span.LogFields() record -func Bool(key string, val bool) Field { - var numericVal int64 - if val { - numericVal = 1 - } - return Field{ - key: key, - fieldType: boolType, - numericVal: numericVal, - } -} - -// Int adds an int-valued key:value pair to a Span.LogFields() record -func Int(key string, val int) Field { - return Field{ - key: key, - fieldType: intType, - numericVal: int64(val), - } -} - -// Int32 adds an int32-valued key:value pair to a Span.LogFields() record -func Int32(key string, val int32) Field { - return Field{ - key: key, - fieldType: int32Type, - numericVal: int64(val), - } -} - -// Int64 adds an int64-valued key:value pair to a Span.LogFields() record -func Int64(key string, val int64) Field { - return Field{ - key: key, - fieldType: int64Type, - numericVal: val, - } -} - -// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record -func Uint32(key string, val uint32) Field { - return Field{ - key: key, - fieldType: uint32Type, - numericVal: int64(val), - } -} - -// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record -func Uint64(key string, val uint64) Field { - return Field{ - key: key, - fieldType: uint64Type, - numericVal: int64(val), - } -} - -// Float32 adds a float32-valued key:value pair to a Span.LogFields() record -func Float32(key string, val float32) Field { - return Field{ - key: key, - fieldType: float32Type, - numericVal: int64(math.Float32bits(val)), - } -} - -// Float64 adds a float64-valued key:value pair to a Span.LogFields() record -func Float64(key string, val float64) Field { - return Field{ - key: key, - fieldType: float64Type, - numericVal: int64(math.Float64bits(val)), - } -} - -// Error adds an error with the key "error.object" to a Span.LogFields() record -func Error(err error) Field { - return Field{ - key: "error.object", - fieldType: errorType, - interfaceVal: err, - } -} - -// Object adds an object-valued key:value pair to a Span.LogFields() record -// Please pass in an immutable object, otherwise there may be concurrency issues. -// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write". -// Because span is sent asynchronously, it is possible that this map will also be modified. -func Object(key string, obj interface{}) Field { - return Field{ - key: key, - fieldType: objectType, - interfaceVal: obj, - } -} - -// Event creates a string-valued Field for span logs with key="event" and value=val. -func Event(val string) Field { - return String("event", val) -} - -// Message creates a string-valued Field for span logs with key="message" and value=val. -func Message(val string) Field { - return String("message", val) -} - -// LazyLogger allows for user-defined, late-bound logging of arbitrary data -type LazyLogger func(fv Encoder) - -// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing -// implementation will call the LazyLogger function at an indefinite time in -// the future (after Lazy() returns). -func Lazy(ll LazyLogger) Field { - return Field{ - fieldType: lazyLoggerType, - interfaceVal: ll, - } -} - -// Noop creates a no-op log field that should be ignored by the tracer. -// It can be used to capture optional fields, for example those that should -// only be logged in non-production environment: -// -// func customerField(order *Order) log.Field { -// if os.Getenv("ENVIRONMENT") == "dev" { -// return log.String("customer", order.Customer.ID) -// } -// return log.Noop() -// } -// -// span.LogFields(log.String("event", "purchase"), customerField(order)) -// -func Noop() Field { - return Field{ - fieldType: noopType, - } -} - -// Encoder allows access to the contents of a Field (via a call to -// Field.Marshal). -// -// Tracer implementations typically provide an implementation of Encoder; -// OpenTracing callers typically do not need to concern themselves with it. -type Encoder interface { - EmitString(key, value string) - EmitBool(key string, value bool) - EmitInt(key string, value int) - EmitInt32(key string, value int32) - EmitInt64(key string, value int64) - EmitUint32(key string, value uint32) - EmitUint64(key string, value uint64) - EmitFloat32(key string, value float32) - EmitFloat64(key string, value float64) - EmitObject(key string, value interface{}) - EmitLazyLogger(value LazyLogger) -} - -// Marshal passes a Field instance through to the appropriate -// field-type-specific method of an Encoder. -func (lf Field) Marshal(visitor Encoder) { - switch lf.fieldType { - case stringType: - visitor.EmitString(lf.key, lf.stringVal) - case boolType: - visitor.EmitBool(lf.key, lf.numericVal != 0) - case intType: - visitor.EmitInt(lf.key, int(lf.numericVal)) - case int32Type: - visitor.EmitInt32(lf.key, int32(lf.numericVal)) - case int64Type: - visitor.EmitInt64(lf.key, int64(lf.numericVal)) - case uint32Type: - visitor.EmitUint32(lf.key, uint32(lf.numericVal)) - case uint64Type: - visitor.EmitUint64(lf.key, uint64(lf.numericVal)) - case float32Type: - visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) - case float64Type: - visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) - case errorType: - if err, ok := lf.interfaceVal.(error); ok { - visitor.EmitString(lf.key, err.Error()) - } else { - visitor.EmitString(lf.key, "") - } - case objectType: - visitor.EmitObject(lf.key, lf.interfaceVal) - case lazyLoggerType: - visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) - case noopType: - // intentionally left blank - } -} - -// Key returns the field's key. -func (lf Field) Key() string { - return lf.key -} - -// Value returns the field's value as interface{}. -func (lf Field) Value() interface{} { - switch lf.fieldType { - case stringType: - return lf.stringVal - case boolType: - return lf.numericVal != 0 - case intType: - return int(lf.numericVal) - case int32Type: - return int32(lf.numericVal) - case int64Type: - return int64(lf.numericVal) - case uint32Type: - return uint32(lf.numericVal) - case uint64Type: - return uint64(lf.numericVal) - case float32Type: - return math.Float32frombits(uint32(lf.numericVal)) - case float64Type: - return math.Float64frombits(uint64(lf.numericVal)) - case errorType, objectType, lazyLoggerType: - return lf.interfaceVal - case noopType: - return nil - default: - return nil - } -} - -// String returns a string representation of the key and value. -func (lf Field) String() string { - return fmt.Sprint(lf.key, ":", lf.Value()) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go deleted file mode 100644 index d57e28aa5..000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/util.go +++ /dev/null @@ -1,61 +0,0 @@ -package log - -import ( - "fmt" - "reflect" -) - -// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice -// a la Span.LogFields(). -func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { - if len(keyValues)%2 != 0 { - return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) - } - fields := make([]Field, len(keyValues)/2) - for i := 0; i*2 < len(keyValues); i++ { - key, ok := keyValues[i*2].(string) - if !ok { - return nil, fmt.Errorf( - "non-string key (pair #%d): %T", - i, keyValues[i*2]) - } - switch typedVal := keyValues[i*2+1].(type) { - case bool: - fields[i] = Bool(key, typedVal) - case string: - fields[i] = String(key, typedVal) - case int: - fields[i] = Int(key, typedVal) - case int8: - fields[i] = Int32(key, int32(typedVal)) - case int16: - fields[i] = Int32(key, int32(typedVal)) - case int32: - fields[i] = Int32(key, typedVal) - case int64: - fields[i] = Int64(key, typedVal) - case uint: - fields[i] = Uint64(key, uint64(typedVal)) - case uint64: - fields[i] = Uint64(key, typedVal) - case uint8: - fields[i] = Uint32(key, uint32(typedVal)) - case uint16: - fields[i] = Uint32(key, uint32(typedVal)) - case uint32: - fields[i] = Uint32(key, typedVal) - case float32: - fields[i] = Float32(key, typedVal) - case float64: - fields[i] = Float64(key, typedVal) - default: - if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) { - fields[i] = String(key, "nil") - continue - } - // When in doubt, coerce to a string - fields[i] = String(key, fmt.Sprint(typedVal)) - } - } - return fields, nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go deleted file mode 100644 index f9b680a21..000000000 --- a/vendor/github.com/opentracing/opentracing-go/noop.go +++ /dev/null @@ -1,64 +0,0 @@ -package opentracing - -import "github.com/opentracing/opentracing-go/log" - -// A NoopTracer is a trivial, minimum overhead implementation of Tracer -// for which all operations are no-ops. -// -// The primary use of this implementation is in libraries, such as RPC -// frameworks, that make tracing an optional feature controlled by the -// end user. A no-op implementation allows said libraries to use it -// as the default Tracer and to write instrumentation that does -// not need to keep checking if the tracer instance is nil. -// -// For the same reason, the NoopTracer is the default "global" tracer -// (see GlobalTracer and SetGlobalTracer functions). -// -// WARNING: NoopTracer does not support baggage propagation. -type NoopTracer struct{} - -type noopSpan struct{} -type noopSpanContext struct{} - -var ( - defaultNoopSpanContext SpanContext = noopSpanContext{} - defaultNoopSpan Span = noopSpan{} - defaultNoopTracer Tracer = NoopTracer{} -) - -const ( - emptyString = "" -) - -// noopSpanContext: -func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} - -// noopSpan: -func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } -func (n noopSpan) SetBaggageItem(key, val string) Span { return n } -func (n noopSpan) BaggageItem(key string) string { return emptyString } -func (n noopSpan) SetTag(key string, value interface{}) Span { return n } -func (n noopSpan) LogFields(fields ...log.Field) {} -func (n noopSpan) LogKV(keyVals ...interface{}) {} -func (n noopSpan) Finish() {} -func (n noopSpan) FinishWithOptions(opts FinishOptions) {} -func (n noopSpan) SetOperationName(operationName string) Span { return n } -func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } -func (n noopSpan) LogEvent(event string) {} -func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} -func (n noopSpan) Log(data LogData) {} - -// StartSpan belongs to the Tracer interface. -func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { - return defaultNoopSpan -} - -// Inject belongs to the Tracer interface. -func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { - return nil -} - -// Extract belongs to the Tracer interface. -func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { - return nil, ErrSpanContextNotFound -} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go deleted file mode 100644 index b0c275eb0..000000000 --- a/vendor/github.com/opentracing/opentracing-go/propagation.go +++ /dev/null @@ -1,176 +0,0 @@ -package opentracing - -import ( - "errors" - "net/http" -) - -/////////////////////////////////////////////////////////////////////////////// -// CORE PROPAGATION INTERFACES: -/////////////////////////////////////////////////////////////////////////////// - -var ( - // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or - // Tracer.Extract() is not recognized by the Tracer implementation. - ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") - - // ErrSpanContextNotFound occurs when the `carrier` passed to - // Tracer.Extract() is valid and uncorrupted but has insufficient - // information to extract a SpanContext. - ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") - - // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to - // operate on a SpanContext which it is not prepared to handle (for - // example, since it was created by a different tracer implementation). - ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") - - // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() - // implementations expect a different type of `carrier` than they are - // given. - ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") - - // ErrSpanContextCorrupted occurs when the `carrier` passed to - // Tracer.Extract() is of the expected type but is corrupted. - ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") -) - -/////////////////////////////////////////////////////////////////////////////// -// BUILTIN PROPAGATION FORMATS: -/////////////////////////////////////////////////////////////////////////////// - -// BuiltinFormat is used to demarcate the values within package `opentracing` -// that are intended for use with the Tracer.Inject() and Tracer.Extract() -// methods. -type BuiltinFormat byte - -const ( - // Binary represents SpanContexts as opaque binary data. - // - // For Tracer.Inject(): the carrier must be an `io.Writer`. - // - // For Tracer.Extract(): the carrier must be an `io.Reader`. - Binary BuiltinFormat = iota - - // TextMap represents SpanContexts as key:value string pairs. - // - // Unlike HTTPHeaders, the TextMap format does not restrict the key or - // value character sets in any way. - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - TextMap - - // HTTPHeaders represents SpanContexts as HTTP header string pairs. - // - // Unlike TextMap, the HTTPHeaders format requires that the keys and values - // be valid as HTTP headers as-is (i.e., character casing may be unstable - // and special characters are disallowed in keys, values should be - // URL-escaped, etc). - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - // - // See HTTPHeadersCarrier for an implementation of both TextMapWriter - // and TextMapReader that defers to an http.Header instance for storage. - // For example, Inject(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := span.Tracer().Inject( - // span.Context(), opentracing.HTTPHeaders, carrier) - // - // Or Extract(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract( - // opentracing.HTTPHeaders, carrier) - // - HTTPHeaders -) - -// TextMapWriter is the Inject() carrier for the TextMap builtin format. With -// it, the caller can encode a SpanContext for propagation as entries in a map -// of unicode strings. -type TextMapWriter interface { - // Set a key:value pair to the carrier. Multiple calls to Set() for the - // same key leads to undefined behavior. - // - // NOTE: The backing store for the TextMapWriter may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - Set(key, val string) -} - -// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, -// the caller can decode a propagated SpanContext as entries in a map of -// unicode strings. -type TextMapReader interface { - // ForeachKey returns TextMap contents via repeated calls to the `handler` - // function. If any call to `handler` returns a non-nil error, ForeachKey - // terminates and returns that error. - // - // NOTE: The backing store for the TextMapReader may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - // - // The "foreach" callback pattern reduces unnecessary copying in some cases - // and also allows implementations to hold locks while the map is read. - ForeachKey(handler func(key, val string) error) error -} - -// TextMapCarrier allows the use of regular map[string]string -// as both TextMapWriter and TextMapReader. -type TextMapCarrier map[string]string - -// ForeachKey conforms to the TextMapReader interface. -func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { - for k, v := range c { - if err := handler(k, v); err != nil { - return err - } - } - return nil -} - -// Set implements Set() of opentracing.TextMapWriter -func (c TextMapCarrier) Set(key, val string) { - c[key] = val -} - -// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. -// -// Example usage for server side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) -// -// Example usage for client side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// err := tracer.Inject( -// span.Context(), -// opentracing.HTTPHeaders, -// carrier) -// -type HTTPHeadersCarrier http.Header - -// Set conforms to the TextMapWriter interface. -func (c HTTPHeadersCarrier) Set(key, val string) { - h := http.Header(c) - h.Set(key, val) -} - -// ForeachKey conforms to the TextMapReader interface. -func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { - for k, vals := range c { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go deleted file mode 100644 index 0d3fb5341..000000000 --- a/vendor/github.com/opentracing/opentracing-go/span.go +++ /dev/null @@ -1,189 +0,0 @@ -package opentracing - -import ( - "time" - - "github.com/opentracing/opentracing-go/log" -) - -// SpanContext represents Span state that must propagate to descendant Spans and across process -// boundaries (e.g., a tuple). -type SpanContext interface { - // ForeachBaggageItem grants access to all baggage items stored in the - // SpanContext. - // The handler function will be called for each baggage key/value pair. - // The ordering of items is not guaranteed. - // - // The bool return value indicates if the handler wants to continue iterating - // through the rest of the baggage items; for example if the handler is trying to - // find some baggage item by pattern matching the name, it can return false - // as soon as the item is found to stop further iterations. - ForeachBaggageItem(handler func(k, v string) bool) -} - -// Span represents an active, un-finished span in the OpenTracing system. -// -// Spans are created by the Tracer interface. -type Span interface { - // Sets the end timestamp and finalizes Span state. - // - // With the exception of calls to Context() (which are always allowed), - // Finish() must be the last call made to any span instance, and to do - // otherwise leads to undefined behavior. - Finish() - // FinishWithOptions is like Finish() but with explicit control over - // timestamps and log data. - FinishWithOptions(opts FinishOptions) - - // Context() yields the SpanContext for this Span. Note that the return - // value of Context() is still valid after a call to Span.Finish(), as is - // a call to Span.Context() after a call to Span.Finish(). - Context() SpanContext - - // Sets or changes the operation name. - // - // Returns a reference to this Span for chaining. - SetOperationName(operationName string) Span - - // Adds a tag to the span. - // - // If there is a pre-existing tag set for `key`, it is overwritten. - // - // Tag values can be numeric types, strings, or bools. The behavior of - // other tag value types is undefined at the OpenTracing level. If a - // tracing system does not know how to handle a particular value type, it - // may ignore the tag, but shall not panic. - // - // Returns a reference to this Span for chaining. - SetTag(key string, value interface{}) Span - - // LogFields is an efficient and type-checked way to record key:value - // logging data about a Span, though the programming interface is a little - // more verbose than LogKV(). Here's an example: - // - // span.LogFields( - // log.String("event", "soft error"), - // log.String("type", "cache timeout"), - // log.Int("waited.millis", 1500)) - // - // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. - LogFields(fields ...log.Field) - - // LogKV is a concise, readable way to record key:value logging data about - // a Span, though unfortunately this also makes it less efficient and less - // type-safe than LogFields(). Here's an example: - // - // span.LogKV( - // "event", "soft error", - // "type", "cache timeout", - // "waited.millis", 1500) - // - // For LogKV (as opposed to LogFields()), the parameters must appear as - // key-value pairs, like - // - // span.LogKV(key1, val1, key2, val2, key3, val3, ...) - // - // The keys must all be strings. The values may be strings, numeric types, - // bools, Go error instances, or arbitrary structs. - // - // (Note to implementors: consider the log.InterleavedKVToFields() helper) - LogKV(alternatingKeyValues ...interface{}) - - // SetBaggageItem sets a key:value pair on this Span and its SpanContext - // that also propagates to descendants of this Span. - // - // SetBaggageItem() enables powerful functionality given a full-stack - // opentracing integration (e.g., arbitrary application data from a mobile - // app can make it, transparently, all the way into the depths of a storage - // system), and with it some powerful costs: use this feature with care. - // - // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to - // *future* causal descendants of the associated Span. - // - // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and - // value is copied into every local *and remote* child of the associated - // Span, and that can add up to a lot of network and cpu overhead. - // - // Returns a reference to this Span for chaining. - SetBaggageItem(restrictedKey, value string) Span - - // Gets the value for a baggage item given its key. Returns the empty string - // if the value isn't found in this Span. - BaggageItem(restrictedKey string) string - - // Provides access to the Tracer that created this Span. - Tracer() Tracer - - // Deprecated: use LogFields or LogKV - LogEvent(event string) - // Deprecated: use LogFields or LogKV - LogEventWithPayload(event string, payload interface{}) - // Deprecated: use LogFields or LogKV - Log(data LogData) -} - -// LogRecord is data associated with a single Span log. Every LogRecord -// instance must specify at least one Field. -type LogRecord struct { - Timestamp time.Time - Fields []log.Field -} - -// FinishOptions allows Span.FinishWithOptions callers to override the finish -// timestamp and provide log data via a bulk interface. -type FinishOptions struct { - // FinishTime overrides the Span's finish time, or implicitly becomes - // time.Now() if FinishTime.IsZero(). - // - // FinishTime must resolve to a timestamp that's >= the Span's StartTime - // (per StartSpanOptions). - FinishTime time.Time - - // LogRecords allows the caller to specify the contents of many LogFields() - // calls with a single slice. May be nil. - // - // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must - // be set explicitly). Also, they must be >= the Span's start timestamp and - // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the - // behavior of FinishWithOptions() is undefined. - // - // If specified, the caller hands off ownership of LogRecords at - // FinishWithOptions() invocation time. - // - // If specified, the (deprecated) BulkLogData must be nil or empty. - LogRecords []LogRecord - - // BulkLogData is DEPRECATED. - BulkLogData []LogData -} - -// LogData is DEPRECATED -type LogData struct { - Timestamp time.Time - Event string - Payload interface{} -} - -// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord -func (ld *LogData) ToLogRecord() LogRecord { - var literalTimestamp time.Time - if ld.Timestamp.IsZero() { - literalTimestamp = time.Now() - } else { - literalTimestamp = ld.Timestamp - } - rval := LogRecord{ - Timestamp: literalTimestamp, - } - if ld.Payload == nil { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - } - } else { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - log.Object("payload", ld.Payload), - } - } - return rval -} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go deleted file mode 100644 index 715f0cedf..000000000 --- a/vendor/github.com/opentracing/opentracing-go/tracer.go +++ /dev/null @@ -1,304 +0,0 @@ -package opentracing - -import "time" - -// Tracer is a simple, thin interface for Span creation and SpanContext -// propagation. -type Tracer interface { - - // Create, start, and return a new Span with the given `operationName` and - // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows - // from the "functional options" pattern, per - // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) - // - // A Span with no SpanReference options (e.g., opentracing.ChildOf() or - // opentracing.FollowsFrom()) becomes the root of its own trace. - // - // Examples: - // - // var tracer opentracing.Tracer = ... - // - // // The root-span case: - // sp := tracer.StartSpan("GetFeed") - // - // // The vanilla child span case: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context())) - // - // // All the bells and whistles: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context()), - // opentracing.Tag{"user_agent", loggedReq.UserAgent}, - // opentracing.StartTime(loggedReq.Timestamp), - // ) - // - StartSpan(operationName string, opts ...StartSpanOption) Span - - // Inject() takes the `sm` SpanContext instance and injects it for - // propagation within `carrier`. The actual type of `carrier` depends on - // the value of `format`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see https://godoc.org/context#WithValue). - // - // Example usage (sans error handling): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := tracer.Inject( - // span.Context(), - // opentracing.HTTPHeaders, - // carrier) - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Implementations may return opentracing.ErrUnsupportedFormat if `format` - // is not supported by (or not known by) the implementation. - // - // Implementations may return opentracing.ErrInvalidCarrier or any other - // implementation-specific error if the format is supported but injection - // fails anyway. - // - // See Tracer.Extract(). - Inject(sm SpanContext, format interface{}, carrier interface{}) error - - // Extract() returns a SpanContext instance given `format` and `carrier`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see - // https://godoc.org/golang.org/x/net/context#WithValue). - // - // Example usage (with StartSpan): - // - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) - // - // // ... assuming the ultimate goal here is to resume the trace with a - // // server-side Span: - // var serverSpan opentracing.Span - // if err == nil { - // span = tracer.StartSpan( - // rpcMethodName, ext.RPCServerOption(clientContext)) - // } else { - // span = tracer.StartSpan(rpcMethodName) - // } - // - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Return values: - // - A successful Extract returns a SpanContext instance and a nil error - // - If there was simply no SpanContext to extract in `carrier`, Extract() - // returns (nil, opentracing.ErrSpanContextNotFound) - // - If `format` is unsupported or unrecognized, Extract() returns (nil, - // opentracing.ErrUnsupportedFormat) - // - If there are more fundamental problems with the `carrier` object, - // Extract() may return opentracing.ErrInvalidCarrier, - // opentracing.ErrSpanContextCorrupted, or implementation-specific - // errors. - // - // See Tracer.Inject(). - Extract(format interface{}, carrier interface{}) (SpanContext, error) -} - -// StartSpanOptions allows Tracer.StartSpan() callers and implementors a -// mechanism to override the start timestamp, specify Span References, and make -// a single Tag or multiple Tags available at Span start time. -// -// StartSpan() callers should look at the StartSpanOption interface and -// implementations available in this package. -// -// Tracer implementations can convert a slice of `StartSpanOption` instances -// into a `StartSpanOptions` struct like so: -// -// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { -// sso := opentracing.StartSpanOptions{} -// for _, o := range opts { -// o.Apply(&sso) -// } -// ... -// } -// -type StartSpanOptions struct { - // Zero or more causal references to other Spans (via their SpanContext). - // If empty, start a "root" Span (i.e., start a new trace). - References []SpanReference - - // StartTime overrides the Span's start time, or implicitly becomes - // time.Now() if StartTime.IsZero(). - StartTime time.Time - - // Tags may have zero or more entries; the restrictions on map values are - // identical to those for Span.SetTag(). May be nil. - // - // If specified, the caller hands off ownership of Tags at - // StartSpan() invocation time. - Tags map[string]interface{} -} - -// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. -// -// StartSpanOption borrows from the "functional options" pattern, per -// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis -type StartSpanOption interface { - Apply(*StartSpanOptions) -} - -// SpanReferenceType is an enum type describing different categories of -// relationships between two Spans. If Span-2 refers to Span-1, the -// SpanReferenceType describes Span-1 from Span-2's perspective. For example, -// ChildOfRef means that Span-1 created Span-2. -// -// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for -// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, -// or Span-2 may be sitting in a distributed queue behind Span-1. -type SpanReferenceType int - -const ( - // ChildOfRef refers to a parent Span that caused *and* somehow depends - // upon the new child Span. Often (but not always), the parent Span cannot - // finish until the child Span does. - // - // An timing diagram for a ChildOfRef that's blocked on the new Span: - // - // [-Parent Span---------] - // [-Child Span----] - // - // See http://opentracing.io/spec/ - // - // See opentracing.ChildOf() - ChildOfRef SpanReferenceType = iota - - // FollowsFromRef refers to a parent Span that does not depend in any way - // on the result of the new child Span. For instance, one might use - // FollowsFromRefs to describe pipeline stages separated by queues, - // or a fire-and-forget cache insert at the tail end of a web request. - // - // A FollowsFromRef Span is part of the same logical trace as the new Span: - // i.e., the new Span is somehow caused by the work of its FollowsFromRef. - // - // All of the following could be valid timing diagrams for children that - // "FollowFrom" a parent. - // - // [-Parent Span-] [-Child Span-] - // - // - // [-Parent Span--] - // [-Child Span-] - // - // - // [-Parent Span-] - // [-Child Span-] - // - // See http://opentracing.io/spec/ - // - // See opentracing.FollowsFrom() - FollowsFromRef -) - -// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a -// referenced SpanContext. See the SpanReferenceType documentation for -// supported relationships. If SpanReference is created with -// ReferencedContext==nil, it has no effect. Thus it allows for a more concise -// syntax for starting spans: -// -// sc, _ := tracer.Extract(someFormat, someCarrier) -// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) -// -// The `ChildOf(sc)` option above will not panic if sc == nil, it will just -// not add the parent span reference to the options. -type SpanReference struct { - Type SpanReferenceType - ReferencedContext SpanContext -} - -// Apply satisfies the StartSpanOption interface. -func (r SpanReference) Apply(o *StartSpanOptions) { - if r.ReferencedContext != nil { - o.References = append(o.References, r) - } -} - -// ChildOf returns a StartSpanOption pointing to a dependent parent span. -// If sc == nil, the option has no effect. -// -// See ChildOfRef, SpanReference -func ChildOf(sc SpanContext) SpanReference { - return SpanReference{ - Type: ChildOfRef, - ReferencedContext: sc, - } -} - -// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused -// the child Span but does not directly depend on its result in any way. -// If sc == nil, the option has no effect. -// -// See FollowsFromRef, SpanReference -func FollowsFrom(sc SpanContext) SpanReference { - return SpanReference{ - Type: FollowsFromRef, - ReferencedContext: sc, - } -} - -// StartTime is a StartSpanOption that sets an explicit start timestamp for the -// new Span. -type StartTime time.Time - -// Apply satisfies the StartSpanOption interface. -func (t StartTime) Apply(o *StartSpanOptions) { - o.StartTime = time.Time(t) -} - -// Tags are a generic map from an arbitrary string key to an opaque value type. -// The underlying tracing system is responsible for interpreting and -// serializing the values. -type Tags map[string]interface{} - -// Apply satisfies the StartSpanOption interface. -func (t Tags) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - for k, v := range t { - o.Tags[k] = v - } -} - -// Tag may be passed as a StartSpanOption to add a tag to new spans, -// or its Set method may be used to apply the tag to an existing Span, -// for example: -// -// tracer.StartSpan("opName", Tag{"Key", value}) -// -// or -// -// Tag{"key", value}.Set(span) -type Tag struct { - Key string - Value interface{} -} - -// Apply satisfies the StartSpanOption interface. -func (t Tag) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - o.Tags[t.Key] = t.Value -} - -// Set applies the tag to an existing Span. -func (t Tag) Set(s Span) { - s.SetTag(t.Key, t.Value) -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/client_golang/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index dd878a30e..000000000 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,23 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f0346..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index c67ff1b7f..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go deleted file mode 100644 index 450189f35..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "runtime/debug" - -// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewBuildInfoCollector instead. -func NewBuildInfoCollector() Collector { - path, version, sum := "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - c := &selfCollector{MustNewConstMetric( - NewDesc( - "go_build_info", - "Build information about the main Go module.", - nil, Labels{"path": path, "version": version, "checksum": sum}, - ), - GaugeValue, 1)} - c.init(c.self) - return c -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index cf05079fb..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “unchecked”, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. - Collect(chan<- Metric) -} - -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collector (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} - -// collectorMetric is a metric that is also a collector. -// Because of selfCollector, most (if not all) Metrics in -// this package are also collectors. -type collectorMetric interface { - Metric - Collector -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go deleted file mode 100644 index f4c92913a..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/collectors.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package collectors provides implementations of prometheus.Collector to -// conveniently collect process and Go-related metrics. -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewBuildInfoCollector returns a collector collecting a single metric -// "go_build_info" with the constant value 1 and three labels "path", "version", -// and "checksum". Their label values contain the main module path, version, and -// checksum, respectively. The labels will only have meaningful values if the -// binary is built with Go module support and from source code retrieved from -// the source repository (rather than the local file system). This is usually -// accomplished by building from outside of GOPATH, specifying the full address -// of the main package, e.g. "GO111MODULE=on go run -// github.com/prometheus/client_golang/examples/random". If built without Go -// module support, all label values will be "unknown". If built with Go module -// support but using the source code from the local file system, the "path" will -// be set appropriately, but "checksum" will be empty and "version" will be -// "(devel)". -// -// This collector uses only the build information for the main module. See -// https://github.com/povilasv/prommod for an example of a collector for the -// module dependencies. -func NewBuildInfoCollector() prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewBuildInfoCollector() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go deleted file mode 100644 index d5a7279fb..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/dbstats_collector.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import ( - "database/sql" - - "github.com/prometheus/client_golang/prometheus" -) - -type dbStatsCollector struct { - db *sql.DB - - maxOpenConnections *prometheus.Desc - - openConnections *prometheus.Desc - inUseConnections *prometheus.Desc - idleConnections *prometheus.Desc - - waitCount *prometheus.Desc - waitDuration *prometheus.Desc - maxIdleClosed *prometheus.Desc - maxIdleTimeClosed *prometheus.Desc - maxLifetimeClosed *prometheus.Desc -} - -// NewDBStatsCollector returns a collector that exports metrics about the given *sql.DB. -// See https://golang.org/pkg/database/sql/#DBStats for more information on stats. -func NewDBStatsCollector(db *sql.DB, dbName string) prometheus.Collector { - fqName := func(name string) string { - return "go_sql_" + name - } - return &dbStatsCollector{ - db: db, - maxOpenConnections: prometheus.NewDesc( - fqName("max_open_connections"), - "Maximum number of open connections to the database.", - nil, prometheus.Labels{"db_name": dbName}, - ), - openConnections: prometheus.NewDesc( - fqName("open_connections"), - "The number of established connections both in use and idle.", - nil, prometheus.Labels{"db_name": dbName}, - ), - inUseConnections: prometheus.NewDesc( - fqName("in_use_connections"), - "The number of connections currently in use.", - nil, prometheus.Labels{"db_name": dbName}, - ), - idleConnections: prometheus.NewDesc( - fqName("idle_connections"), - "The number of idle connections.", - nil, prometheus.Labels{"db_name": dbName}, - ), - waitCount: prometheus.NewDesc( - fqName("wait_count_total"), - "The total number of connections waited for.", - nil, prometheus.Labels{"db_name": dbName}, - ), - waitDuration: prometheus.NewDesc( - fqName("wait_duration_seconds_total"), - "The total time blocked waiting for a new connection.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxIdleClosed: prometheus.NewDesc( - fqName("max_idle_closed_total"), - "The total number of connections closed due to SetMaxIdleConns.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxIdleTimeClosed: prometheus.NewDesc( - fqName("max_idle_time_closed_total"), - "The total number of connections closed due to SetConnMaxIdleTime.", - nil, prometheus.Labels{"db_name": dbName}, - ), - maxLifetimeClosed: prometheus.NewDesc( - fqName("max_lifetime_closed_total"), - "The total number of connections closed due to SetConnMaxLifetime.", - nil, prometheus.Labels{"db_name": dbName}, - ), - } -} - -// Describe implements Collector. -func (c *dbStatsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- c.maxOpenConnections - ch <- c.openConnections - ch <- c.inUseConnections - ch <- c.idleConnections - ch <- c.waitCount - ch <- c.waitDuration - ch <- c.maxIdleClosed - ch <- c.maxLifetimeClosed - ch <- c.maxIdleTimeClosed -} - -// Collect implements Collector. -func (c *dbStatsCollector) Collect(ch chan<- prometheus.Metric) { - stats := c.db.Stats() - ch <- prometheus.MustNewConstMetric(c.maxOpenConnections, prometheus.GaugeValue, float64(stats.MaxOpenConnections)) - ch <- prometheus.MustNewConstMetric(c.openConnections, prometheus.GaugeValue, float64(stats.OpenConnections)) - ch <- prometheus.MustNewConstMetric(c.inUseConnections, prometheus.GaugeValue, float64(stats.InUse)) - ch <- prometheus.MustNewConstMetric(c.idleConnections, prometheus.GaugeValue, float64(stats.Idle)) - ch <- prometheus.MustNewConstMetric(c.waitCount, prometheus.CounterValue, float64(stats.WaitCount)) - ch <- prometheus.MustNewConstMetric(c.waitDuration, prometheus.CounterValue, stats.WaitDuration.Seconds()) - ch <- prometheus.MustNewConstMetric(c.maxIdleClosed, prometheus.CounterValue, float64(stats.MaxIdleClosed)) - ch <- prometheus.MustNewConstMetric(c.maxLifetimeClosed, prometheus.CounterValue, float64(stats.MaxLifetimeClosed)) - ch <- prometheus.MustNewConstMetric(c.maxIdleTimeClosed, prometheus.CounterValue, float64(stats.MaxIdleTimeClosed)) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go deleted file mode 100644 index 3aa8d0590..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/expvar_collector.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewExpvarCollector returns a newly allocated expvar Collector. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*prometheus.Desc) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewExpvarCollector(exports) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go deleted file mode 100644 index effc57840..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_go116.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// NewGoCollector returns a collector that exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This requires to “stop the world”, which usually only happens for -// garbage collection (GC). Take the following implications into account when -// deciding whether to use the Go collector: -// -// 1. The performance impact of stopping the world is the more relevant the more -// frequently metrics are collected. However, with Go1.9 or later the -// stop-the-world time per metrics collection is very short (~25µs) so that the -// performance impact will only matter in rare cases. However, with older Go -// versions, the stop-the-world duration depends on the heap size and can be -// quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -// -// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the -// metrics collection happens to coincide with GC, it will only complete after -// GC has finished. Usually, GC is fast enough to not cause problems. However, -// with a very large heap, GC might take multiple seconds, which is enough to -// cause scrape timeouts in common setups. To avoid this problem, the Go -// collector will use the memstats from a previous collection if -// runtime.ReadMemStats takes more than 1s. However, if there are no previously -// collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. -// -// NOTE: The problem is solved in Go 1.15, see -// https://github.com/golang/go/issues/19812 for the related Go issue. -func NewGoCollector() prometheus.Collector { - return prometheus.NewGoCollector() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go deleted file mode 100644 index 246c5ea94..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/go_collector_latest.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package collectors - -import ( - "regexp" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/internal" -) - -var ( - // MetricsAll allows all the metrics to be collected from Go runtime. - MetricsAll = GoRuntimeMetricsRule{regexp.MustCompile("/.*")} - // MetricsGC allows only GC metrics to be collected from Go runtime. - // e.g. go_gc_cycles_automatic_gc_cycles_total - MetricsGC = GoRuntimeMetricsRule{regexp.MustCompile(`^/gc/.*`)} - // MetricsMemory allows only memory metrics to be collected from Go runtime. - // e.g. go_memory_classes_heap_free_bytes - MetricsMemory = GoRuntimeMetricsRule{regexp.MustCompile(`^/memory/.*`)} - // MetricsScheduler allows only scheduler metrics to be collected from Go runtime. - // e.g. go_sched_goroutines_goroutines - MetricsScheduler = GoRuntimeMetricsRule{regexp.MustCompile(`^/sched/.*`)} -) - -// WithGoCollectorMemStatsMetricsDisabled disables metrics that is gathered in runtime.MemStats structure such as: -// -// go_memstats_alloc_bytes -// go_memstats_alloc_bytes_total -// go_memstats_sys_bytes -// go_memstats_lookups_total -// go_memstats_mallocs_total -// go_memstats_frees_total -// go_memstats_heap_alloc_bytes -// go_memstats_heap_sys_bytes -// go_memstats_heap_idle_bytes -// go_memstats_heap_inuse_bytes -// go_memstats_heap_released_bytes -// go_memstats_heap_objects -// go_memstats_stack_inuse_bytes -// go_memstats_stack_sys_bytes -// go_memstats_mspan_inuse_bytes -// go_memstats_mspan_sys_bytes -// go_memstats_mcache_inuse_bytes -// go_memstats_mcache_sys_bytes -// go_memstats_buck_hash_sys_bytes -// go_memstats_gc_sys_bytes -// go_memstats_other_sys_bytes -// go_memstats_next_gc_bytes -// -// so the metrics known from pre client_golang v1.12.0, -// -// NOTE(bwplotka): The above represents runtime.MemStats statistics, but they are -// actually implemented using new runtime/metrics package. (except skipped go_memstats_gc_cpu_fraction -// -- see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 for explanation). -// -// Some users might want to disable this on collector level (although you can use scrape relabelling on Prometheus), -// because similar metrics can be now obtained using WithGoCollectorRuntimeMetrics. Note that the semantics of new -// metrics might be different, plus the names can be change over time with different Go version. -// -// NOTE(bwplotka): Changing metric names can be tedious at times as the alerts, recording rules and dashboards have to be adjusted. -// The old metrics are also very useful, with many guides and books written about how to interpret them. -// -// As a result our recommendation would be to stick with MemStats like metrics and enable other runtime/metrics if you are interested -// in advanced insights Go provides. See ExampleGoCollector_WithAdvancedGoMetrics. -func WithGoCollectorMemStatsMetricsDisabled() func(options *internal.GoCollectorOptions) { - return func(o *internal.GoCollectorOptions) { - o.DisableMemStatsLikeMetrics = true - } -} - -// GoRuntimeMetricsRule allow enabling and configuring particular group of runtime/metrics. -// TODO(bwplotka): Consider adding ability to adjust buckets. -type GoRuntimeMetricsRule struct { - // Matcher represents RE2 expression will match the runtime/metrics from https://golang.bg/src/runtime/metrics/description.go - // Use `regexp.MustCompile` or `regexp.Compile` to create this field. - Matcher *regexp.Regexp -} - -// WithGoCollectorRuntimeMetrics allows enabling and configuring particular group of runtime/metrics. -// See the list of metrics https://golang.bg/src/runtime/metrics/description.go (pick the Go version you use there!). -// You can use this option in repeated manner, which will add new rules. The order of rules is important, the last rule -// that matches particular metrics is applied. -func WithGoCollectorRuntimeMetrics(rules ...GoRuntimeMetricsRule) func(options *internal.GoCollectorOptions) { - rs := make([]internal.GoCollectorRule, len(rules)) - for i, r := range rules { - rs[i] = internal.GoCollectorRule{ - Matcher: r.Matcher, - } - } - - return func(o *internal.GoCollectorOptions) { - o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) - } -} - -// WithoutGoCollectorRuntimeMetrics allows disabling group of runtime/metrics that you might have added in WithGoCollectorRuntimeMetrics. -// It behaves similarly to WithGoCollectorRuntimeMetrics just with deny-list semantics. -func WithoutGoCollectorRuntimeMetrics(matchers ...*regexp.Regexp) func(options *internal.GoCollectorOptions) { - rs := make([]internal.GoCollectorRule, len(matchers)) - for i, m := range matchers { - rs[i] = internal.GoCollectorRule{ - Matcher: m, - Deny: true, - } - } - - return func(o *internal.GoCollectorOptions) { - o.RuntimeMetricRules = append(o.RuntimeMetricRules, rs...) - } -} - -// GoCollectionOption represents Go collection option flag. -// Deprecated. -type GoCollectionOption uint32 - -const ( - // GoRuntimeMemStatsCollection represents the metrics represented by runtime.MemStats structure. - // Deprecated. Use WithGoCollectorMemStatsMetricsDisabled() function to disable those metrics in the collector. - GoRuntimeMemStatsCollection GoCollectionOption = 1 << iota - // GoRuntimeMetricsCollection is the new set of metrics represented by runtime/metrics package. - // Deprecated. Use WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")}) - // function to enable those metrics in the collector. - GoRuntimeMetricsCollection -) - -// WithGoCollections allows enabling different collections for Go collector on top of base metrics. -// Deprecated. Use WithGoCollectorRuntimeMetrics() and WithGoCollectorMemStatsMetricsDisabled() instead to control metrics. -func WithGoCollections(flags GoCollectionOption) func(options *internal.GoCollectorOptions) { - return func(options *internal.GoCollectorOptions) { - if flags&GoRuntimeMemStatsCollection == 0 { - WithGoCollectorMemStatsMetricsDisabled()(options) - } - - if flags&GoRuntimeMetricsCollection != 0 { - WithGoCollectorRuntimeMetrics(GoRuntimeMetricsRule{Matcher: regexp.MustCompile("/.*")})(options) - } - } -} - -// NewGoCollector returns a collector that exports metrics about the current Go -// process using debug.GCStats (base metrics) and runtime/metrics (both in MemStats style and new ones). -func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewGoCollector(opts...) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go deleted file mode 100644 index 24558f50a..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collectors/process_collector.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package collectors - -import "github.com/prometheus/client_golang/prometheus" - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. -// -// The collector only works on operating systems with a Linux-style proc -// filesystem and on Microsoft Windows. On other operating systems, it will not -// collect any metrics. -func NewProcessCollector(opts ProcessCollectorOpts) prometheus.Collector { - //nolint:staticcheck // Ignore SA1019 until v2. - return prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{ - PidFn: opts.PidFn, - Namespace: opts.Namespace, - ReportErrors: opts.ReportErrors, - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index a912b75a0..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,328 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// ExemplarAdder is implemented by Counters that offer the option of adding a -// value to the Counter together with an exemplar. Its AddWithExemplar method -// works like the Add method of the Counter interface but also replaces the -// currently saved exemplar (if any) with a new one, created from the provided -// value, the current time as timestamp, and the provided labels. Empty Labels -// will lead to a valid (label-less) exemplar. But if Labels is nil, the current -// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any -// of the provided labels are invalid, or if the provided labels contain more -// than 128 runes in total. -type ExemplarAdder interface { - AddWithExemplar(value float64, exemplar Labels) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation also implements ExemplarAdder. It is safe to -// perform the corresponding type assertion. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} - result.init(result) // Init self-collection. - return result -} - -type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - labelPairs []*dto.LabelPair - exemplar atomic.Value // Containing nil or a *dto.Exemplar. - - now func() time.Time // To mock out time.Now() for testing. -} - -func (c *counter) Desc() *Desc { - return c.desc -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) AddWithExemplar(v float64, e Labels) { - c.Add(v) - c.updateExemplar(v, e) -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) get() float64 { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - return fval + float64(ival) -} - -func (c *counter) Write(out *dto.Metric) error { - // Read the Exemplar first and the value second. This is to avoid a race condition - // where users see an exemplar for a not-yet-existing observation. - var exemplar *dto.Exemplar - if e := c.exemplar.Load(); e != nil { - exemplar = e.(*dto.Exemplar) - } - val := c.get() - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) -} - -func (c *counter) updateExemplar(v float64, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, c.now(), l) - if err != nil { - panic(err) - } - c.exemplar.Store(e) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -type CounterVec struct { - *MetricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -// -// Check out the ExampleGaugeFunc examples for the similar GaugeFunc. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index 8bc5e44e2..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/cespare/xxhash/v2" - - "github.com/prometheus/client_golang/prometheus/internal" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // variableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if !model.IsValidMetricName(model.LabelValue(fqName)) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - - xxh := xxhash.New() - for _, val := range labelValues { - xxh.WriteString(val) - xxh.Write(separatorByteSlice) - } - d.id = xxh.Sum64() - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - xxh.Reset() - xxh.WriteString(help) - xxh.Write(separatorByteSlice) - for _, labelName := range labelNames { - xxh.WriteString(labelName) - xxh.Write(separatorByteSlice) - } - d.dimHash = xxh.Sum64() - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(internal.LabelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 811072cbd..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// # A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// type metrics struct { -// cpuTemp prometheus.Gauge -// hdFailures *prometheus.CounterVec -// } -// -// func NewMetrics(reg prometheus.Registerer) *metrics { -// m := &metrics{ -// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }), -// hdFailures: prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ), -// } -// reg.MustRegister(m.cpuTemp) -// reg.MustRegister(m.hdFailures) -// return m -// } -// -// func main() { -// // Create a non-global registry. -// reg := prometheus.NewRegistry() -// -// // Create new metrics and register them using the custom registry. -// m := NewMetrics(reg) -// // Set values for the new created metrics. -// m.cpuTemp.Set(65.3) -// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // Expose metrics and custom registry via an HTTP server -// // using the HandleFor function. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// It register the metrics using a custom registry and exposes them via an HTTP server -// on the /metrics endpoint. -// -// # Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and -// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, -// and HistogramVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. -// -// # Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). NewConstMetric is used -// for all metric types with just a float64 as their value: Counter, Gauge, and -// a special “type” called Untyped. Use the latter if you are not sure if the -// mirrored metric is a Counter or a Gauge. Creation of the Metric instance -// happens in the Collect method. The Describe method has to return separate -// Desc instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. Alternatively, -// you could return no Desc at all, which will mark the Collector “unchecked”. -// No checks are performed at registration time, but metric consistency will -// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situation where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// # Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// # HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// -// # Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// # Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// # Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index c41ab37f3..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewExpvarCollector instead. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index 3d383a735..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index 21271a5bb..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. -func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *MetricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. Therefore, it must be safe to call the provided function -// concurrently. -// -// NewGaugeFunc is a good way to create an “info” style metric with a constant -// value of 1. Example: -// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go deleted file mode 100644 index 614fd61be..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !js || wasm -// +build !js wasm - -package prometheus - -import "os" - -func getPIDFn() func() (int, error) { - pid := os.Getpid() - return func() (int, error) { - return pid, nil - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go deleted file mode 100644 index eaf8059ee..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js && !wasm -// +build js,!wasm - -package prometheus - -func getPIDFn() func() (int, error) { - return func() (int, error) { - return 1, nil - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index ad9a71a5e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "runtime" - "runtime/debug" - "time" -) - -// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats. -// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so -// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is -// populated using runtime/metrics. -func goRuntimeMemStats() memStatsMetrics { - return memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - "Total number of mallocs.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of frees.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of allocated objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, - } -} - -type baseGoCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - gcLastTimeDesc *Desc - goInfoDesc *Desc -} - -func newBaseGoCollector() baseGoCollector { - return baseGoCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", - nil, nil), - gcLastTimeDesc: NewDesc( - "go_memstats_last_gc_time_seconds", - "Number of seconds since 1970 of last garbage collection.", - nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), - } -} - -// Describe returns all descriptions of the collector. -func (c *baseGoCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - ch <- c.gcLastTimeDesc - ch <- c.goInfoDesc -} - -// Collect returns the current state of all metrics of the collector. -func (c *baseGoCollector) Collect(ch chan<- Metric) { - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - - n := getRuntimeNumThreads() - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9) - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) -} - -func memstatNamespace(s string) string { - return "go_memstats_" + s -} - -// memStatsMetrics provide description, evaluator, runtime/metrics name, and -// value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go deleted file mode 100644 index 897a6e906..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.17 -// +build !go1.17 - -package prometheus - -import ( - "runtime" - "sync" - "time" -) - -type goCollector struct { - base baseGoCollector - - // ms... are memstats related. - msLast *runtime.MemStats // Previously collected memstats. - msLastTimestamp time.Time - msMtx sync.Mutex // Protects msLast and msLastTimestamp. - msMetrics memStatsMetrics - msRead func(*runtime.MemStats) // For mocking in tests. - msMaxWait time.Duration // Wait time for fresh memstats. - msMaxAge time.Duration // Maximum allowed age of old memstats. -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector() Collector { - msMetrics := goRuntimeMemStats() - msMetrics = append(msMetrics, struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType - }{ - // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }) - return &goCollector{ - base: newBaseGoCollector(), - msLast: &runtime.MemStats{}, - msRead: runtime.ReadMemStats, - msMaxWait: time.Second, - msMaxAge: 5 * time.Minute, - msMetrics: msMetrics, - } -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - var ( - ms = &runtime.MemStats{} - done = make(chan struct{}) - ) - // Start reading memstats first as it might take a while. - go func() { - c.msRead(ms) - c.msMtx.Lock() - c.msLast = ms - c.msLastTimestamp = time.Now() - c.msMtx.Unlock() - close(done) - }() - - // Collect base non-memory metrics. - c.base.Collect(ch) - - timer := time.NewTimer(c.msMaxWait) - select { - case <-done: // Our own ReadMemStats succeeded in time. Use it. - timer.Stop() // Important for high collection frequencies to not pile up timers. - c.msCollect(ch, ms) - return - case <-timer.C: // Time out, use last memstats if possible. Continue below. - } - c.msMtx.Lock() - if time.Since(c.msLastTimestamp) < c.msMaxAge { - // Last memstats are recent enough. Collect from them under the lock. - c.msCollect(ch, c.msLast) - c.msMtx.Unlock() - return - } - // If we are here, the last memstats are too old or don't exist. We have - // to wait until our own ReadMemStats finally completes. For that to - // happen, we have to release the lock. - c.msMtx.Unlock() - <-done - c.msCollect(ch, ms) -} - -func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go deleted file mode 100644 index 3a2d55e84..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ /dev/null @@ -1,568 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package prometheus - -import ( - "math" - "runtime" - "runtime/metrics" - "strings" - "sync" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -const ( - // constants for strings referenced more than once. - goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects" - goGCHeapAllocsObjects = "/gc/heap/allocs:objects" - goGCHeapFreesObjects = "/gc/heap/frees:objects" - goGCHeapFreesBytes = "/gc/heap/frees:bytes" - goGCHeapAllocsBytes = "/gc/heap/allocs:bytes" - goGCHeapObjects = "/gc/heap/objects:objects" - goGCHeapGoalBytes = "/gc/heap/goal:bytes" - goMemoryClassesTotalBytes = "/memory/classes/total:bytes" - goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes" - goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes" - goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes" - goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes" - goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes" - goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes" - goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes" - goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes" - goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes" - goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes" - goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes" - goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes" - goMemoryClassesOtherBytes = "/memory/classes/other:bytes" -) - -// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic. -var rmNamesForMemStatsMetrics = []string{ - goGCHeapTinyAllocsObjects, - goGCHeapAllocsObjects, - goGCHeapFreesObjects, - goGCHeapAllocsBytes, - goGCHeapObjects, - goGCHeapGoalBytes, - goMemoryClassesTotalBytes, - goMemoryClassesHeapObjectsBytes, - goMemoryClassesHeapUnusedBytes, - goMemoryClassesHeapReleasedBytes, - goMemoryClassesHeapFreeBytes, - goMemoryClassesHeapStacksBytes, - goMemoryClassesOSStacksBytes, - goMemoryClassesMetadataMSpanInuseBytes, - goMemoryClassesMetadataMSPanFreeBytes, - goMemoryClassesMetadataMCacheInuseBytes, - goMemoryClassesMetadataMCacheFreeBytes, - goMemoryClassesProfilingBucketsBytes, - goMemoryClassesMetadataOtherBytes, - goMemoryClassesOtherBytes, -} - -func bestEffortLookupRM(lookup []string) []metrics.Description { - ret := make([]metrics.Description, 0, len(lookup)) - for _, rm := range metrics.All() { - for _, m := range lookup { - if m == rm.Name { - ret = append(ret, rm) - } - } - } - return ret -} - -type goCollector struct { - base baseGoCollector - - // mu protects updates to all fields ensuring a consistent - // snapshot is always produced by Collect. - mu sync.Mutex - - // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed). - sampleBuf []metrics.Sample - // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums. - sampleMap map[string]*metrics.Sample - - // rmExposedMetrics represents all runtime/metrics package metrics - // that were configured to be exposed. - rmExposedMetrics []collectorMetric - rmExactSumMapForHist map[string]string - - // With Go 1.17, the runtime/metrics package was introduced. - // From that point on, metric names produced by the runtime/metrics - // package could be generated from runtime/metrics names. However, - // these differ from the old names for the same values. - // - // This field exists to export the same values under the old names - // as well. - msMetrics memStatsMetrics - msMetricsEnabled bool -} - -type rmMetricDesc struct { - metrics.Description -} - -func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc { - var descs []rmMetricDesc - for _, d := range metrics.All() { - var ( - deny = true - desc rmMetricDesc - ) - - for _, r := range rules { - if !r.Matcher.MatchString(d.Name) { - continue - } - deny = r.Deny - } - if deny { - continue - } - - desc.Description = d - descs = append(descs, desc) - } - return descs -} - -func defaultGoCollectorOptions() internal.GoCollectorOptions { - return internal.GoCollectorOptions{ - RuntimeMetricSumForHist: map[string]string{ - "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes, - "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes, - }, - RuntimeMetricRules: []internal.GoCollectorRule{ - //{Matcher: regexp.MustCompile("")}, - }, - } -} - -// NewGoCollector is the obsolete version of collectors.NewGoCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewGoCollector instead. -func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { - opt := defaultGoCollectorOptions() - for _, o := range opts { - o(&opt) - } - - exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules) - - // Collect all histogram samples so that we can get their buckets. - // The API guarantees that the buckets are always fixed for the lifetime - // of the process. - var histograms []metrics.Sample - for _, d := range exposedDescriptions { - if d.Kind == metrics.KindFloat64Histogram { - histograms = append(histograms, metrics.Sample{Name: d.Name}) - } - } - - if len(histograms) > 0 { - metrics.Read(histograms) - } - - bucketsMap := make(map[string][]float64) - for i := range histograms { - bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets - } - - // Generate a collector for each exposed runtime/metrics metric. - metricSet := make([]collectorMetric, 0, len(exposedDescriptions)) - // SampleBuf is used for reading from runtime/metrics. - // We are assuming the largest case to have stable pointers for sampleMap purposes. - sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics)) - sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions)) - for _, d := range exposedDescriptions { - namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description) - if !ok { - // Just ignore this metric; we can't do anything with it here. - // If a user decides to use the latest version of Go, we don't want - // to fail here. This condition is tested in TestExpectedRuntimeMetrics. - continue - } - - sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name}) - sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1] - - var m collectorMetric - if d.Kind == metrics.KindFloat64Histogram { - _, hasSum := opt.RuntimeMetricSumForHist[d.Name] - unit := d.Name[strings.IndexRune(d.Name, ':')+1:] - m = newBatchHistogram( - NewDesc( - BuildFQName(namespace, subsystem, name), - d.Description.Description, - nil, - nil, - ), - internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit), - hasSum, - ) - } else if d.Cumulative { - m = NewCounter(CounterOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: d.Description.Description, - }, - ) - } else { - m = NewGauge(GaugeOpts{ - Namespace: namespace, - Subsystem: subsystem, - Name: name, - Help: d.Description.Description, - }) - } - metricSet = append(metricSet, m) - } - - // Add exact sum metrics to sampleBuf if not added before. - for _, h := range histograms { - sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name] - if !ok { - continue - } - - if _, ok := sampleMap[sumMetric]; ok { - continue - } - sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric}) - sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1] - } - - var ( - msMetrics memStatsMetrics - msDescriptions []metrics.Description - ) - - if !opt.DisableMemStatsLikeMetrics { - msMetrics = goRuntimeMemStats() - msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics) - - // Check if metric was not exposed before and if not, add to sampleBuf. - for _, mdDesc := range msDescriptions { - if _, ok := sampleMap[mdDesc.Name]; ok { - continue - } - sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name}) - sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1] - } - } - - return &goCollector{ - base: newBaseGoCollector(), - sampleBuf: sampleBuf, - sampleMap: sampleMap, - rmExposedMetrics: metricSet, - rmExactSumMapForHist: opt.RuntimeMetricSumForHist, - msMetrics: msMetrics, - msMetricsEnabled: !opt.DisableMemStatsLikeMetrics, - } -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - c.base.Describe(ch) - for _, i := range c.msMetrics { - ch <- i.desc - } - for _, m := range c.rmExposedMetrics { - ch <- m.Desc() - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - // Collect base non-memory metrics. - c.base.Collect(ch) - - if len(c.sampleBuf) == 0 { - return - } - - // Collect must be thread-safe, so prevent concurrent use of - // sampleBuf elements. Just read into sampleBuf but write all the data - // we get into our Metrics or MemStats. - // - // This lock also ensures that the Metrics we send out are all from - // the same updates, ensuring their mutual consistency insofar as - // is guaranteed by the runtime/metrics package. - // - // N.B. This locking is heavy-handed, but Collect is expected to be called - // relatively infrequently. Also the core operation here, metrics.Read, - // is fast (O(tens of microseconds)) so contention should certainly be - // low, though channel operations and any allocations may add to that. - c.mu.Lock() - defer c.mu.Unlock() - - // Populate runtime/metrics sample buffer. - metrics.Read(c.sampleBuf) - - // Collect all our runtime/metrics user chose to expose from sampleBuf (if any). - for i, metric := range c.rmExposedMetrics { - // We created samples for exposed metrics first in order, so indexes match. - sample := c.sampleBuf[i] - - // N.B. switch on concrete type because it's significantly more efficient - // than checking for the Counter and Gauge interface implementations. In - // this case, we control all the types here. - switch m := metric.(type) { - case *counter: - // Guard against decreases. This should never happen, but a failure - // to do so will result in a panic, which is a harsh consequence for - // a metrics collection bug. - v0, v1 := m.get(), unwrapScalarRMValue(sample.Value) - if v1 > v0 { - m.Add(unwrapScalarRMValue(sample.Value) - m.get()) - } - m.Collect(ch) - case *gauge: - m.Set(unwrapScalarRMValue(sample.Value)) - m.Collect(ch) - case *batchHistogram: - m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name)) - m.Collect(ch) - default: - panic("unexpected metric type") - } - } - - if c.msMetricsEnabled { - // ms is a dummy MemStats that we populate ourselves so that we can - // populate the old metrics from it if goMemStatsCollection is enabled. - var ms runtime.MemStats - memStatsFromRM(&ms, c.sampleMap) - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms)) - } - } -} - -// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed -// to be scalar and returns the equivalent float64 value. Panics if the -// value is not scalar. -func unwrapScalarRMValue(v metrics.Value) float64 { - switch v.Kind() { - case metrics.KindUint64: - return float64(v.Uint64()) - case metrics.KindFloat64: - return v.Float64() - case metrics.KindBad: - // Unsupported metric. - // - // This should never happen because we always populate our metric - // set from the runtime/metrics package. - panic("unexpected unsupported metric") - default: - // Unsupported metric kind. - // - // This should never happen because we check for this during initialization - // and flag and filter metrics whose kinds we don't understand. - panic("unexpected unsupported metric kind") - } -} - -// exactSumFor takes a runtime/metrics metric name (that is assumed to -// be of kind KindFloat64Histogram) and returns its exact sum and whether -// its exact sum exists. -// -// The runtime/metrics API for histograms doesn't currently expose exact -// sums, but some of the other metrics are in fact exact sums of histograms. -func (c *goCollector) exactSumFor(rmName string) float64 { - sumName, ok := c.rmExactSumMapForHist[rmName] - if !ok { - return 0 - } - s, ok := c.sampleMap[sumName] - if !ok { - return 0 - } - return unwrapScalarRMValue(s.Value) -} - -func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) { - lookupOrZero := func(name string) uint64 { - if s, ok := rm[name]; ok { - return s.Value.Uint64() - } - return 0 - } - - // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees. - // The reason for this is because MemStats couldn't be extended at the time - // but there was a desire to have Mallocs at least be a little more representative, - // while having Mallocs - Frees still represent a live object count. - // Unfortunately, MemStats doesn't actually export a large allocation count, - // so it's impossible to pull this number out directly. - tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects) - ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs - ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs - - ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes) - ms.Sys = lookupOrZero(goMemoryClassesTotalBytes) - ms.Lookups = 0 // Already always zero. - ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes) - ms.Alloc = ms.HeapAlloc - ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes) - ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes) - ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes) - ms.HeapSys = ms.HeapInuse + ms.HeapIdle - ms.HeapObjects = lookupOrZero(goGCHeapObjects) - ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes) - ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes) - ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes) - ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes) - ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes) - ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes) - ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes) - ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes) - ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes) - ms.NextGC = lookupOrZero(goGCHeapGoalBytes) - - // N.B. GCCPUFraction is intentionally omitted. This metric is not useful, - // and often misleading due to the fact that it's an average over the lifetime - // of the process. - // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034 - // for more details. - ms.GCCPUFraction = 0 -} - -// batchHistogram is a mutable histogram that is updated -// in batches. -type batchHistogram struct { - selfCollector - - // Static fields updated only once. - desc *Desc - hasSum bool - - // Because this histogram operates in batches, it just uses a - // single mutex for everything. updates are always serialized - // but Write calls may operate concurrently with updates. - // Contention between these two sources should be rare. - mu sync.Mutex - buckets []float64 // Inclusive lower bounds, like runtime/metrics. - counts []uint64 - sum float64 // Used if hasSum is true. -} - -// newBatchHistogram creates a new batch histogram value with the given -// Desc, buckets, and whether or not it has an exact sum available. -// -// buckets must always be from the runtime/metrics package, following -// the same conventions. -func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram { - // We need to remove -Inf values. runtime/metrics keeps them around. - // But -Inf bucket should not be allowed for prometheus histograms. - if buckets[0] == math.Inf(-1) { - buckets = buckets[1:] - } - h := &batchHistogram{ - desc: desc, - buckets: buckets, - // Because buckets follows runtime/metrics conventions, there's - // 1 more value in the buckets list than there are buckets represented, - // because in runtime/metrics, the bucket values represent *boundaries*, - // and non-Inf boundaries are inclusive lower bounds for that bucket. - counts: make([]uint64, len(buckets)-1), - hasSum: hasSum, - } - h.init(h) - return h -} - -// update updates the batchHistogram from a runtime/metrics histogram. -// -// sum must be provided if the batchHistogram was created to have an exact sum. -// h.buckets must be a strict subset of his.Buckets. -func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) { - counts, buckets := his.Counts, his.Buckets - - h.mu.Lock() - defer h.mu.Unlock() - - // Clear buckets. - for i := range h.counts { - h.counts[i] = 0 - } - // Copy and reduce buckets. - var j int - for i, count := range counts { - h.counts[j] += count - if buckets[i+1] == h.buckets[j+1] { - j++ - } - } - if h.hasSum { - h.sum = sum - } -} - -func (h *batchHistogram) Desc() *Desc { - return h.desc -} - -func (h *batchHistogram) Write(out *dto.Metric) error { - h.mu.Lock() - defer h.mu.Unlock() - - sum := float64(0) - if h.hasSum { - sum = h.sum - } - dtoBuckets := make([]*dto.Bucket, 0, len(h.counts)) - totalCount := uint64(0) - for i, count := range h.counts { - totalCount += count - if !h.hasSum { - if count != 0 { - // N.B. This computed sum is an underestimate. - sum += h.buckets[i] * float64(count) - } - } - - // Skip the +Inf bucket, but only for the bucket list. - // It must still count for sum and totalCount. - if math.IsInf(h.buckets[i+1], 1) { - break - } - // Float64Histogram's upper bound is exclusive, so make it inclusive - // by obtaining the next float64 value down, in order. - upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i]) - dtoBuckets = append(dtoBuckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(totalCount), - UpperBound: proto.Float64(upperBound), - }) - } - out.Histogram = &dto.Histogram{ - Bucket: dtoBuckets, - SampleCount: proto.Uint64(totalCount), - SampleSum: proto.Float64(sum), - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index 4c873a01c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,1484 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// nativeHistogramBounds for the frac of observed values. Only relevant for -// schema > 0. The position in the slice is the schema. (0 is never used, just -// here for convenience of using the schema directly as the index.) -// -// TODO(beorn7): Currently, we do a binary search into these slices. There are -// ways to turn it into a small number of simple array lookups. It probably only -// matters for schema 5 and beyond, but should be investigated. See this comment -// as a starting point: -// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310 -var nativeHistogramBounds = [][]float64{ - // Schema "0": - {0.5}, - // Schema 1: - {0.5, 0.7071067811865475}, - // Schema 2: - {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144}, - // Schema 3: - { - 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048, - 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711, - }, - // Schema 4: - { - 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458, - 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463, - 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627, - 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735, - }, - // Schema 5: - { - 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117, - 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887, - 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666, - 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159, - 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112, - 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823, - 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533, - 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999, - }, - // Schema 6: - { - 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142, - 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598, - 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209, - 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406, - 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349, - 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891, - 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515, - 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555, - 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234, - 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269, - 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334, - 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681, - 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529, - 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991, - 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827, - 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752, - }, - // Schema 7: - { - 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764, - 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894, - 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309, - 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545, - 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393, - 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595, - 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754, - 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704, - 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907, - 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665, - 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253, - 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329, - 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032, - 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728, - 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265, - 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076, - 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491, - 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908, - 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126, - 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777, - 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764, - 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465, - 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821, - 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981, - 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312, - 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842, - 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671, - 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263, - 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943, - 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368, - 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164, - 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328, - }, - // Schema 8: - { - 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088, - 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869, - 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205, - 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158, - 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313, - 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321, - 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954, - 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847, - 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111, - 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088, - 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098, - 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026, - 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894, - 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493, - 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185, - 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968, - 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903, - 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005, - 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725, - 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082, - 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581, - 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031, - 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346, - 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447, - 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385, - 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788, - 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727, - 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171, - 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058, - 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119, - 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999, - 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352, - 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471, - 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126, - 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218, - 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837, - 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984, - 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031, - 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071, - 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282, - 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442, - 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707, - 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818, - 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853, - 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642, - 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003, - 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079, - 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391, - 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661, - 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629, - 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553, - 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389, - 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771, - 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002, - 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155, - 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483, - 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253, - 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191, - 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693, - 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947, - 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133, - 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889, - 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168, - 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, - }, -} - -// The nativeHistogramBounds above can be generated with the code below. -// -// TODO(beorn7): It's tempting to actually use `go generate` to generate the -// code above. However, this could lead to slightly different numbers on -// different architectures. We still need to come to terms if we are fine with -// that, or if we might prefer to specify precise numbers in the standard. -// -// var nativeHistogramBounds [][]float64 = make([][]float64, 9) -// -// func init() { -// // Populate nativeHistogramBounds. -// numBuckets := 1 -// for i := range nativeHistogramBounds { -// bounds := []float64{0.5} -// factor := math.Exp2(math.Exp2(float64(-i))) -// for j := 0; j < numBuckets-1; j++ { -// var bound float64 -// if (j+1)%2 == 0 { -// // Use previously calculated value for increased precision. -// bound = nativeHistogramBounds[i-1][j/2+1] -// } else { -// bound = bounds[j] * factor -// } -// bounds = append(bounds, bound) -// } -// numBuckets *= 2 -// nativeHistogramBounds[i] = bounds -// } -// } - -// A Histogram counts individual observations from an event or sample stream in -// configurable static buckets (or in dynamic sparse buckets as part of the -// experimental Native Histograms, see below for more details). Similar to a -// Summary, it also provides a sum of observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile PromQL function. -// -// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL -// (see the documentation for detailed procedures). However, Histograms require -// the user to pre-define suitable buckets, and they are in general less -// accurate. (Both problems are addressed by the experimental Native -// Histograms. To use them, configure a NativeHistogramBucketFactor in the -// HistogramOpts. They also require a Prometheus server v2.40+ with the -// corresponding feature flag enabled.) -// -// The Observe method of a Histogram has a very low performance overhead in -// comparison with the Observe method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. (The experimental Native - // Histograms handle negative observations properly.) See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - -// DefNativeHistogramZeroThreshold is the default value for -// NativeHistogramZeroThreshold in the HistogramOpts. -// -// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation), -// which is a bucket boundary at all possible resolutions. -const DefNativeHistogramZeroThreshold = 2.938735877055719e-39 - -// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold -// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero -// bucket that only receives observations of precisely zero. -const NativeHistogramZeroThresholdZero = -1 - -var errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, -) - -// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the -// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not -// counted and not included in the returned slice. The returned slice is meant -// to be used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket -// has an upper bound of 'start' and each following bucket's upper bound is -// 'factor' times the previous bucket's upper bound. The final +Inf bucket is -// not counted and not included in the returned slice. The returned slice is -// meant to be used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is -// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { - if count < 1 { - panic("ExponentialBucketsRange count needs a positive count") - } - if min <= 0 { - panic("ExponentialBucketsRange min needs to be greater than 0") - } - - // Formula for exponential buckets. - // max = min*growthFactor^(bucketCount-1) - - // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(max/min, 1.0/float64(count-1)) - - // Now that we know growthFactor, solve for each bucket. - buckets := make([]float64, count) - for i := 1; i <= count; i++ { - buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. If Buckets is left as nil or set to a slice of length - // zero, it is replaced by default buckets. The default buckets are - // DefBuckets if no buckets for a native histogram (see below) are used, - // otherwise the default is no buckets. (In other words, if you want to - // use both reguler buckets and buckets for a native histogram, you have - // to define the regular buckets here explicitly.) - Buckets []float64 - - // If NativeHistogramBucketFactor is greater than one, so-called sparse - // buckets are used (in addition to the regular buckets, if defined - // above). A Histogram with sparse buckets will be ingested as a Native - // Histogram by a Prometheus server with that feature enabled (requires - // Prometheus v2.40+). Sparse buckets are exponential buckets covering - // the whole float64 range (with the exception of the “zero” bucket, see - // SparseBucketsZeroThreshold below). From any one bucket to the next, - // the width of the bucket grows by a constant - // factor. NativeHistogramBucketFactor provides an upper bound for this - // factor (exception see below). The smaller - // NativeHistogramBucketFactor, the more buckets will be used and thus - // the more costly the histogram will become. A generally good trade-off - // between cost and accuracy is a value of 1.1 (each bucket is at most - // 10% wider than the previous one), which will result in each power of - // two divided into 8 buckets (e.g. there will be 8 buckets between 1 - // and 2, same as between 2 and 4, and 4 and 8, etc.). - // - // Details about the actually used factor: The factor is calculated as - // 2^(2^n), where n is an integer number between (and including) -8 and - // 4. n is chosen so that the resulting factor is the largest that is - // still smaller or equal to NativeHistogramBucketFactor. Note that the - // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8) - // ). If NativeHistogramBucketFactor is greater than 1 but smaller than - // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though - // it is larger than the provided NativeHistogramBucketFactor. - // - // NOTE: Native Histograms are still an experimental feature. Their - // behavior might still change without a major version - // bump. Subsequently, all NativeHistogram... options here might still - // change their behavior or name (or might completely disappear) without - // a major version bump. - NativeHistogramBucketFactor float64 - // All observations with an absolute value of less or equal - // NativeHistogramZeroThreshold are accumulated into a “zero” - // bucket. For best results, this should be close to a bucket - // boundary. This is usually the case if picking a power of two. If - // NativeHistogramZeroThreshold is left at zero, - // DefSparseBucketsZeroThreshold is used as the threshold. To configure - // a zero bucket with an actual threshold of zero (i.e. only - // observations of precisely zero will go into the zero bucket), set - // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero - // constant (or any negative float value). - NativeHistogramZeroThreshold float64 - - // The remaining fields define a strategy to limit the number of - // populated sparse buckets. If NativeHistogramMaxBucketNumber is left - // at zero, the number of buckets is not limited. (Note that this might - // lead to unbounded memory consumption if the values observed by the - // Histogram are sufficiently wide-spread. In particular, this could be - // used as a DoS attack vector. Where the observed values depend on - // external inputs, it is highly recommended to set a - // NativeHistogramMaxBucketNumber.) Once the set - // NativeHistogramMaxBucketNumber is exceeded, the following strategy is - // enacted: First, if the last reset (or the creation) of the histogram - // is at least NativeHistogramMinResetDuration ago, then the whole - // histogram is reset to its initial state (including regular - // buckets). If less time has passed, or if - // NativeHistogramMinResetDuration is zero, no reset is - // performed. Instead, the zero threshold is increased sufficiently to - // reduce the number of buckets to or below - // NativeHistogramMaxBucketNumber, but not to more than - // NativeHistogramMaxZeroThreshold. Thus, if - // NativeHistogramMaxZeroThreshold is already at or below the current - // zero threshold, nothing happens at this step. After that, if the - // number of buckets still exceeds NativeHistogramMaxBucketNumber, the - // resolution of the histogram is reduced by doubling the width of the - // sparse buckets (up to a growth factor between one bucket to the next - // of 2^(2^4) = 65536, see above). - NativeHistogramMaxBucketNumber uint32 - NativeHistogramMinResetDuration time.Duration - NativeHistogramMaxZeroThreshold float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -// -// The returned implementation also implements ExemplarObserver. It is safe to -// perform the corresponding type assertion. Exemplars are tracked separately -// for each bucket. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber, - nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold, - nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration, - lastResetTime: time.Now(), - now: time.Now, - } - if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 { - h.upperBounds = DefBuckets - } - if opts.NativeHistogramBucketFactor <= 1 { - h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets. - } else { - switch { - case opts.NativeHistogramZeroThreshold > 0: - h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold - case opts.NativeHistogramZeroThreshold == 0: - h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold - } // Leave h.nativeHistogramZeroThreshold at 0 otherwise. - h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor) - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make buckets - // for both counts as well as exemplars: - h.counts[0] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } - h.counts[1] = &histogramCounts{ - buckets: make([]uint64, len(h.upperBounds)), - nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), - nativeHistogramSchema: h.nativeHistogramSchema, - } - h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) - - h.init(h) // Init self-collection. - return h -} - -type histogramCounts struct { - // Order in this struct matters for the alignment required by atomic - // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG - - // sumBits contains the bits of the float64 representing the sum of all - // observations. - sumBits uint64 - count uint64 - - // nativeHistogramZeroBucket counts all (positive and negative) - // observations in the zero bucket (with an absolute value less or equal - // the current threshold, see next field. - nativeHistogramZeroBucket uint64 - // nativeHistogramZeroThresholdBits is the bit pattern of the current - // threshold for the zero bucket. It's initially equal to - // nativeHistogramZeroThreshold but may change according to the bucket - // count limitation strategy. - nativeHistogramZeroThresholdBits uint64 - // nativeHistogramSchema may change over time according to the bucket - // count limitation strategy and therefore has to be saved here. - nativeHistogramSchema int32 - // Number of (positive and negative) sparse buckets. - nativeHistogramBucketsNumber uint32 - - // Regular buckets. - buckets []uint64 - - // The sparse buckets for native histograms are implemented with a - // sync.Map for now. A dedicated data structure will likely be more - // efficient. There are separate maps for negative and positive - // observations. The map's value is an *int64, counting observations in - // that bucket. (Note that we don't use uint64 as an int64 won't - // overflow in practice, and working with signed numbers from the - // beginning simplifies the handling of deltas.) The map's key is the - // index of the bucket according to the used - // nativeHistogramSchema. Index 0 is for an upper bound of 1. - nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map -} - -// observe manages the parts of observe that only affects -// histogramCounts. doSparse is true if sparse buckets should be done, -// too. -func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) { - if bucket < len(hc.buckets) { - atomic.AddUint64(&hc.buckets[bucket], 1) - } - atomicAddFloat(&hc.sumBits, v) - if doSparse && !math.IsNaN(v) { - var ( - key int - schema = atomic.LoadInt32(&hc.nativeHistogramSchema) - zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits)) - bucketCreated, isInf bool - ) - if math.IsInf(v, 0) { - // Pretend v is MaxFloat64 but later increment key by one. - if math.IsInf(v, +1) { - v = math.MaxFloat64 - } else { - v = -math.MaxFloat64 - } - isInf = true - } - frac, exp := math.Frexp(math.Abs(v)) - if schema > 0 { - bounds := nativeHistogramBounds[schema] - key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds) - } else { - key = exp - if frac == 0.5 { - key-- - } - div := 1 << -schema - key = (key + div - 1) / div - } - if isInf { - key++ - } - switch { - case v > zeroThreshold: - bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1) - case v < -zeroThreshold: - bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1) - default: - atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1) - } - if bucketCreated { - atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1) - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hc.count, 1) -} - -type histogram struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // histogramCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the histogram) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cold fields must - // be merged into the new hot before releasing writeMtx. - // - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - - // Only used in the Write method and for sparse bucket management. - mtx sync.Mutex - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used. - nativeHistogramZeroThreshold float64 // The initial zero threshold. - nativeHistogramMaxZeroThreshold float64 - nativeHistogramMaxBuckets uint32 - nativeHistogramMinResetDuration time.Duration - lastResetTime time.Time // Protected by mtx. - - now func() time.Time // To mock out time.Now() for testing. -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - h.observe(v, h.findBucket(v)) -} - -func (h *histogram) ObserveWithExemplar(v float64, e Labels) { - i := h.findBucket(v) - h.observe(v, i) - h.updateExemplar(v, i, e) -} - -func (h *histogram) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - h.mtx.Lock() - defer h.mtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := h.counts[n>>63] - coldCounts := h.counts[(^n)>>63] - - waitForCooldown(count, coldCounts) - - his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - } - out.Histogram = his - out.Label = h.labelPairs - - var cumCount uint64 - for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - his.Bucket[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), - UpperBound: proto.Float64(upperBound), - } - if e := h.exemplars[i].Load(); e != nil { - his.Bucket[i].Exemplar = e.(*dto.Exemplar) - } - } - // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. - if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e.(*dto.Exemplar), - } - his.Bucket = append(his.Bucket, b) - } - if h.nativeHistogramSchema > math.MinInt32 { - his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits))) - his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema)) - zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket) - - defer func() { - coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber)) - coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber)) - }() - - his.ZeroCount = proto.Uint64(zeroBucket) - his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative) - his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive) - } - addAndResetCounts(hotCounts, coldCounts) - return nil -} - -// findBucket returns the index of the bucket for the provided value, or -// len(h.upperBounds) for the +Inf bucket. -func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - return sort.SearchFloat64s(h.upperBounds, v) -} - -// observe is the implementation for Observe without the findBucket part. -func (h *histogram) observe(v float64, bucket int) { - // Do not add to sparse buckets for NaN observations. - doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v) - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] - hotCounts.observe(v, bucket, doSparse) - if doSparse { - h.limitBuckets(hotCounts, v, bucket) - } -} - -// limitSparsebuckets applies a strategy to limit the number of populated sparse -// buckets. It's generally best effort, and there are situations where the -// number can go higher (if even the lowest resolution isn't enough to reduce -// the number sufficiently, or if the provided counts aren't fully updated yet -// by a concurrently happening Write call). -func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) { - if h.nativeHistogramMaxBuckets == 0 { - return // No limit configured. - } - if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) { - return // Bucket limit not exceeded yet. - } - - h.mtx.Lock() - defer h.mtx.Unlock() - - // The hot counts might have been swapped just before we acquired the - // lock. Re-fetch the hot counts first... - n := atomic.LoadUint64(&h.countAndHotIdx) - hotIdx := n >> 63 - coldIdx := (^n) >> 63 - hotCounts := h.counts[hotIdx] - coldCounts := h.counts[coldIdx] - // ...and then check again if we really have to reduce the bucket count. - if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) { - return // Bucket limit not exceeded after all. - } - // Try the various strategies in order. - if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) { - return - } - if h.maybeWidenZeroBucket(hotCounts, coldCounts) { - return - } - h.doubleBucketWidth(hotCounts, coldCounts) -} - -// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration -// has been passed. It returns true if the histogram has been reset. The caller -// must have locked h.mtx. -func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool { - // We are using the possibly mocked h.now() rather than - // time.Since(h.lastResetTime) to enable testing. - if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration { - return false - } - // Completely reset coldCounts. - h.resetCounts(cold) - // Repeat the latest observation to not lose it completely. - cold.observe(value, bucket, true) - // Make coldCounts the new hot counts while ressetting countAndHotIdx. - n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1) - count := n & ((1 << 63) - 1) - waitForCooldown(count, hot) - // Finally, reset the formerly hot counts, too. - h.resetCounts(hot) - h.lastResetTime = h.now() - return true -} - -// maybeWidenZeroBucket widens the zero bucket until it includes the existing -// buckets closest to the zero bucket (which could be two, if an equidistant -// negative and a positive bucket exists, but usually it's only one bucket to be -// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold -// limits how far the zero bucket can be extended, and if that's not enough to -// include an existing bucket, the method returns false. The caller must have -// locked h.mtx. -func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool { - currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits)) - if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold { - return false - } - // Find the key of the bucket closest to zero. - smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive) - smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative) - if smallestNegativeKey < smallestKey { - smallestKey = smallestNegativeKey - } - if smallestKey == math.MaxInt32 { - return false - } - newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema)) - if newZeroThreshold > h.nativeHistogramMaxZeroThreshold { - return false // New threshold would exceed the max threshold. - } - atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) - // Remove applicable buckets. - if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } - if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded { - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } - // Make cold counts the new hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - count := n & ((1 << 63) - 1) - // Swap the pointer names to represent the new roles and make - // the rest less confusing. - hot, cold = cold, hot - waitForCooldown(count, cold) - // Add all the now cold counts to the new hot counts... - addAndResetCounts(hot, cold) - // ...adjust the new zero threshold in the cold counts, too... - atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold)) - // ...and then merge the newly deleted buckets into the wider zero - // bucket. - mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - key := k.(int) - bucket := v.(*int64) - if key == smallestKey { - // Merge into hot zero bucket... - atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket))) - // ...and delete from cold counts. - coldBuckets.Delete(key) - atomicDecUint32(&cold.nativeHistogramBucketsNumber) - } else { - // Add to corresponding hot bucket... - if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) - } - // ...and reset cold bucket. - atomic.StoreInt64(bucket, 0) - } - return true - } - } - - cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive)) - cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative)) - return true -} - -// doubleBucketWidth doubles the bucket width (by decrementing the schema -// number). Note that very sparse buckets could lead to a low reduction of the -// bucket count (or even no reduction at all). The method does nothing if the -// schema is already -4. -func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) { - coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema) - if coldSchema == -4 { - return // Already at lowest resolution. - } - coldSchema-- - atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) - // Play it simple and just delete all cold buckets. - atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) - deleteSyncMap(&cold.nativeHistogramBucketsNegative) - deleteSyncMap(&cold.nativeHistogramBucketsPositive) - // Make coldCounts the new hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - count := n & ((1 << 63) - 1) - // Swap the pointer names to represent the new roles and make - // the rest less confusing. - hot, cold = cold, hot - waitForCooldown(count, cold) - // Add all the now cold counts to the new hot counts... - addAndResetCounts(hot, cold) - // ...adjust the schema in the cold counts, too... - atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema) - // ...and then merge the cold buckets into the wider hot buckets. - merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - key := k.(int) - bucket := v.(*int64) - // Adjust key to match the bucket to merge into. - if key > 0 { - key++ - } - key /= 2 - // Add to corresponding hot bucket. - if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) { - atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1) - } - return true - } - } - - cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive)) - cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative)) - // Play it simple again and just delete all cold buckets. - atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0) - deleteSyncMap(&cold.nativeHistogramBucketsNegative) - deleteSyncMap(&cold.nativeHistogramBucketsPositive) -} - -func (h *histogram) resetCounts(counts *histogramCounts) { - atomic.StoreUint64(&counts.sumBits, 0) - atomic.StoreUint64(&counts.count, 0) - atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0) - atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold)) - atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema) - atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0) - for i := range h.upperBounds { - atomic.StoreUint64(&counts.buckets[i], 0) - } - deleteSyncMap(&counts.nativeHistogramBucketsNegative) - deleteSyncMap(&counts.nativeHistogramBucketsPositive) -} - -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. -func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, h.now(), l) - if err != nil { - panic(err) - } - h.exemplars[bucket].Store(e) -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *MetricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. The +Inf bucket is implicit, and its value is equal to the provided count. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstHistogram would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} - -// pickSchema returns the largest number n between -4 and 8 such that -// 2^(2^-n) is less or equal the provided bucketFactor. -// -// Special cases: -// - bucketFactor <= 1: panics. -// - bucketFactor < 2^(2^-8) (but > 1): still returns 8. -func pickSchema(bucketFactor float64) int32 { - if bucketFactor <= 1 { - panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor)) - } - floor := math.Floor(math.Log2(math.Log2(bucketFactor))) - switch { - case floor <= -8: - return 8 - case floor >= 4: - return -4 - default: - return -int32(floor) - } -} - -func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) { - var ii []int - buckets.Range(func(k, v interface{}) bool { - ii = append(ii, k.(int)) - return true - }) - sort.Ints(ii) - - if len(ii) == 0 { - return nil, nil - } - - var ( - spans []*dto.BucketSpan - deltas []int64 - prevCount int64 - nextI int - ) - - appendDelta := func(count int64) { - *spans[len(spans)-1].Length++ - deltas = append(deltas, count-prevCount) - prevCount = count - } - - for n, i := range ii { - v, _ := buckets.Load(i) - count := atomic.LoadInt64(v.(*int64)) - // Multiple spans with only small gaps in between are probably - // encoded more efficiently as one larger span with a few empty - // buckets. Needs some research to find the sweet spot. For now, - // we assume that gaps of one ore two buckets should not create - // a new span. - iDelta := int32(i - nextI) - if n == 0 || iDelta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap - // of more than two buckets. - spans = append(spans, &dto.BucketSpan{ - Offset: proto.Int32(iDelta), - Length: proto.Uint32(0), - }) - } else { - // We have found a small gap (or no gap at all). - // Insert empty buckets as needed. - for j := int32(0); j < iDelta; j++ { - appendDelta(0) - } - } - appendDelta(count) - nextI = i + 1 - } - return spans, deltas -} - -// addToBucket increments the sparse bucket at key by the provided amount. It -// returns true if a new sparse bucket had to be created for that. -func addToBucket(buckets *sync.Map, key int, increment int64) bool { - if existingBucket, ok := buckets.Load(key); ok { - // Fast path without allocation. - atomic.AddInt64(existingBucket.(*int64), increment) - return false - } - // Bucket doesn't exist yet. Slow path allocating new counter. - newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape. - if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded { - // The bucket was created concurrently in another goroutine. - // Have to increment after all. - atomic.AddInt64(actualBucket.(*int64), increment) - return false - } - return true -} - -// addAndReset returns a function to be used with sync.Map.Range of spare -// buckets in coldCounts. It increments the buckets in the provided hotBuckets -// according to the buckets ranged through. It then resets all buckets ranged -// through to 0 (but leaves them in place so that they don't need to get -// recreated on the next scrape). -func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool { - return func(k, v interface{}) bool { - bucket := v.(*int64) - if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) { - atomic.AddUint32(bucketNumber, 1) - } - atomic.StoreInt64(bucket, 0) - return true - } -} - -func deleteSyncMap(m *sync.Map) { - m.Range(func(k, v interface{}) bool { - m.Delete(k) - return true - }) -} - -func findSmallestKey(m *sync.Map) int { - result := math.MaxInt32 - m.Range(func(k, v interface{}) bool { - key := k.(int) - if key < result { - result = key - } - return true - }) - return result -} - -func getLe(key int, schema int32) float64 { - // Here a bit of context about the behavior for the last bucket counting - // regular numbers (called simply "last bucket" below) and the bucket - // counting observations of ±Inf (called "inf bucket" below, with a key - // one higher than that of the "last bucket"): - // - // If we apply the usual formula to the last bucket, its upper bound - // would be calculated as +Inf. The reason is that the max possible - // regular float64 number (math.MaxFloat64) doesn't coincide with one of - // the calculated bucket boundaries. So the calculated boundary has to - // be larger than math.MaxFloat64, and the only float64 larger than - // math.MaxFloat64 is +Inf. However, we want to count actual - // observations of ±Inf in the inf bucket. Therefore, we have to treat - // the upper bound of the last bucket specially and set it to - // math.MaxFloat64. (The upper bound of the inf bucket, with its key - // being one higher than that of the last bucket, naturally comes out as - // +Inf by the usual formula. So that's fine.) - // - // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of - // 1024. If there were a float64 number following math.MaxFloat64, it - // would have a frac of 1.0 and an exp of 1024, or equivalently a frac - // of 0.5 and an exp of 1025. However, since frac must be smaller than - // 1, and exp must be smaller than 1025, either representation overflows - // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the - // largest possible float64. Q.E.D.) However, the formula for - // calculating the upper bound from the idx and schema of the last - // bucket results in precisely that. It is either frac=1.0 & exp=1024 - // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is, - // by the way, a power of two where the exponent itself is a power of - // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all - // schemas.) So these are the special cases we have to catch below. - if schema < 0 { - exp := key << -schema - if exp == 1024 { - // This is the last bucket before the overflow bucket - // (for ±Inf observations). Return math.MaxFloat64 as - // explained above. - return math.MaxFloat64 - } - return math.Ldexp(1, exp) - } - - fracIdx := key & ((1 << schema) - 1) - frac := nativeHistogramBounds[schema][fracIdx] - exp := (key >> schema) + 1 - if frac == 0.5 && exp == 1025 { - // This is the last bucket before the overflow bucket (for ±Inf - // observations). Return math.MaxFloat64 as explained above. - return math.MaxFloat64 - } - return math.Ldexp(frac, exp) -} - -// waitForCooldown returns after the count field in the provided histogramCounts -// has reached the provided count value. -func waitForCooldown(count uint64, counts *histogramCounts) { - for count != atomic.LoadUint64(&counts.count) { - runtime.Gosched() // Let observations get work done. - } -} - -// atomicAddFloat adds the provided float atomically to another float -// represented by the bit pattern the bits pointer is pointing to. -func atomicAddFloat(bits *uint64, v float64) { - for { - loadedBits := atomic.LoadUint64(bits) - newBits := math.Float64bits(math.Float64frombits(loadedBits) + v) - if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) { - break - } - } -} - -// atomicDecUint32 atomically decrements the uint32 p points to. See -// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done. -func atomicDecUint32(p *uint32) { - atomic.AddUint32(p, ^uint32(0)) -} - -// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero -// bucket) from the cold counts to the corresponding fields in the hot -// counts. Those fields are then reset to 0 in the cold counts. -func addAndResetCounts(hot, cold *histogramCounts) { - atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count)) - atomic.StoreUint64(&cold.count, 0) - coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits)) - atomicAddFloat(&hot.sumBits, coldSum) - atomic.StoreUint64(&cold.sumBits, 0) - for i := range hot.buckets { - atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i])) - atomic.StoreUint64(&cold.buckets[i], 0) - } - atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket)) - atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go deleted file mode 100644 index 1ed5abe74..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2015 Björn Rabenstein -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. -// -// The code in this package is copy/paste to avoid a dependency. Hence this file -// carries the copyright of the original repo. -// https://github.com/beorn7/floats -package internal - -import ( - "math" -) - -// minNormalFloat64 is the smallest positive normal value of type float64. -var minNormalFloat64 = math.Float64frombits(0x0010000000000000) - -// AlmostEqualFloat64 returns true if a and b are equal within a relative error -// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the -// details of the applied method. -func AlmostEqualFloat64(a, b, epsilon float64) bool { - if a == b { - return true - } - absA := math.Abs(a) - absB := math.Abs(b) - diff := math.Abs(a - b) - if a == 0 || b == 0 || absA+absB < minNormalFloat64 { - return diff < epsilon*minNormalFloat64 - } - return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon -} - -// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64. -func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if !AlmostEqualFloat64(a[i], b[i], epsilon) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go deleted file mode 100644 index fd0750f2c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ /dev/null @@ -1,654 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// Maintaining `GetUnifiedDiffString` here because original repository -// (https://github.com/pmezard/go-difflib) is no loger maintained. -package internal - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool, -) *SequenceMatcher { - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// -// and for all (i',j',k') meeting those conditions, -// -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize++ - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize++ - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{ - c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n), - }) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s]++ - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches++ - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning-- // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return w.String(), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go deleted file mode 100644 index 723b45d64..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import "regexp" - -type GoCollectorRule struct { - Matcher *regexp.Regexp - Deny bool -} - -// GoCollectorOptions should not be used be directly by anything, except `collectors` package. -// Use it via collectors package instead. See issue -// https://github.com/prometheus/client_golang/issues/1030. -// -// This is internal, so external users only can use it via `collector.WithGoCollector*` methods -type GoCollectorOptions struct { - DisableMemStatsLikeMetrics bool - RuntimeMetricSumForHist map[string]string - RuntimeMetricRules []GoCollectorRule -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go deleted file mode 100644 index 97d17d6cb..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build go1.17 -// +build go1.17 - -package internal - -import ( - "math" - "path" - "runtime/metrics" - "strings" - - "github.com/prometheus/common/model" -) - -// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics -// metric description and validates whether the metric is suitable for integration -// with Prometheus. -// -// Returns false if a name could not be produced, or if Prometheus does not understand -// the runtime/metrics Kind. -// -// Note that the main reason a name couldn't be produced is if the runtime/metrics -// package exports a name with characters outside the valid Prometheus metric name -// character set. This is theoretically possible, but should never happen in practice. -// Still, don't rely on it. -func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) { - namespace := "go" - - comp := strings.SplitN(d.Name, ":", 2) - key := comp[0] - unit := comp[1] - - // The last path element in the key is the name, - // the rest is the subsystem. - subsystem := path.Dir(key[1:] /* remove leading / */) - name := path.Base(key) - - // subsystem is translated by replacing all / and - with _. - subsystem = strings.ReplaceAll(subsystem, "/", "_") - subsystem = strings.ReplaceAll(subsystem, "-", "_") - - // unit is translated assuming that the unit contains no - // non-ASCII characters. - unit = strings.ReplaceAll(unit, "-", "_") - unit = strings.ReplaceAll(unit, "*", "_") - unit = strings.ReplaceAll(unit, "/", "_per_") - - // name has - replaced with _ and is concatenated with the unit and - // other data. - name = strings.ReplaceAll(name, "-", "_") - name += "_" + unit - if d.Cumulative && d.Kind != metrics.KindFloat64Histogram { - name += "_total" - } - - valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) - switch d.Kind { - case metrics.KindUint64: - case metrics.KindFloat64: - case metrics.KindFloat64Histogram: - default: - valid = false - } - return namespace, subsystem, name, valid -} - -// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram -// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces -// a reduced set of buckets. This function always removes any -Inf bucket as it's represented -// as the bottom-most upper-bound inclusive bucket in Prometheus. -func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 { - switch unit { - case "bytes": - // Re-bucket as powers of 2. - return reBucketExp(buckets, 2) - case "seconds": - // Re-bucket as powers of 10 and then merge all buckets greater - // than 1 second into the +Inf bucket. - b := reBucketExp(buckets, 10) - for i := range b { - if b[i] <= 1 { - continue - } - b[i] = math.Inf(1) - b = b[:i+1] - break - } - return b - } - return buckets -} - -// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and -// downsamples the buckets to those a multiple of base apart. The end result -// is a roughly exponential (in many cases, perfectly exponential) bucketing -// scheme. -func reBucketExp(buckets []float64, base float64) []float64 { - bucket := buckets[0] - var newBuckets []float64 - // We may see a -Inf here, in which case, add it and skip it - // since we risk producing NaNs otherwise. - // - // We need to preserve -Inf values to maintain runtime/metrics - // conventions. We'll strip it out later. - if bucket == math.Inf(-1) { - newBuckets = append(newBuckets, bucket) - buckets = buckets[1:] - bucket = buckets[0] - } - // From now on, bucket should always have a non-Inf value because - // Infs are only ever at the ends of the bucket lists, so - // arithmetic operations on it are non-NaN. - for i := 1; i < len(buckets); i++ { - if bucket >= 0 && buckets[i] < bucket*base { - // The next bucket we want to include is at least bucket*base. - continue - } else if bucket < 0 && buckets[i] < bucket/base { - // In this case the bucket we're targeting is negative, and since - // we're ascending through buckets here, we need to divide to get - // closer to zero exponentially. - continue - } - // The +Inf bucket will always be the last one, and we'll always - // end up including it here because bucket - newBuckets = append(newBuckets, bucket) - bucket = buckets[i] - } - return append(newBuckets, bucket) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 6515c1148..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// LabelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type LabelPairSorter []*dto.LabelPair - -func (s LabelPairSorter) Len() int { - return len(s) -} - -func (s LabelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s LabelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -// MetricSorter is a sortable slice of *dto.Metric. -type MetricSorter []*dto.Metric - -func (s MetricSorter) Len() int { - return len(s) -} - -func (s MetricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s MetricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(MetricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index c1b8fad36..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%w: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%w: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - return fmt.Errorf( - "%w: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index b5119c504..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sort" - "strings" - "time" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} - -type withExemplarsMetric struct { - Metric - - exemplars []*dto.Exemplar -} - -func (m *withExemplarsMetric) Write(pb *dto.Metric) error { - if err := m.Metric.Write(pb); err != nil { - return err - } - - switch { - case pb.Counter != nil: - pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1] - case pb.Histogram != nil: - for _, e := range m.exemplars { - // pb.Histogram.Bucket are sorted by UpperBound. - i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool { - return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue() - }) - if i < len(pb.Histogram.Bucket) { - pb.Histogram.Bucket[i].Exemplar = e - } else { - // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365. - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e, - } - pb.Histogram.Bucket = append(pb.Histogram.Bucket, b) - } - } - default: - // TODO(bwplotka): Implement Gauge? - return errors.New("cannot inject exemplar into Gauge, Summary or Untyped") - } - - return nil -} - -// Exemplar is easier to use, user-facing representation of *dto.Exemplar. -type Exemplar struct { - Value float64 - Labels Labels - // Optional. - // Default value (time.Time{}) indicates its empty, which should be - // understood as time.Now() time at the moment of creation of metric. - Timestamp time.Time -} - -// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given -// exemplars. Exemplars are validated. -// -// Only last applicable exemplar is injected from the list. -// For example for Counter it means last exemplar is injected. -// For Histogram, it means last applicable exemplar for each bucket is injected. -// -// NewMetricWithExemplars works best with MustNewConstMetric and -// MustNewConstHistogram, see example. -func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) { - if len(exemplars) == 0 { - return nil, errors.New("no exemplar was passed for NewMetricWithExemplars") - } - - var ( - now = time.Now() - exs = make([]*dto.Exemplar, len(exemplars)) - err error - ) - for i, e := range exemplars { - ts := e.Timestamp - if ts == (time.Time{}) { - ts = now - } - exs[i], err = newExemplar(e.Value, ts, e.Labels) - if err != nil { - return nil, err - } - } - - return &withExemplarsMetric{Metric: m, exemplars: exs}, nil -} - -// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where -// NewMetricWithExemplars would have returned an error. -func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric { - ret, err := NewMetricWithExemplars(m, exemplars...) - if err != nil { - panic(err) - } - return ret -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go deleted file mode 100644 index 7c12b2108..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !js || wasm -// +build !js wasm - -package prometheus - -import "runtime" - -// getRuntimeNumThreads returns the number of open OS threads. -func getRuntimeNumThreads() float64 { - n, _ := runtime.ThreadCreateProfile(nil) - return float64(n) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go deleted file mode 100644 index 7348df01d..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js && !wasm -// +build js,!wasm - -package prometheus - -// getRuntimeNumThreads returns the number of open OS threads. -func getRuntimeNumThreads() float64 { - return 1 -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 03773b21f..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} - -// ExemplarObserver is implemented by Observers that offer the option of -// observing a value together with an exemplar. Its ObserveWithExemplar method -// works like the Observe method of an Observer but also replaces the currently -// saved exemplar (if any) with a new one, created from the provided value, the -// current time as timestamp, and the provided Labels. Empty Labels will lead to -// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is -// left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 128 runes in total. -type ExemplarObserver interface { - ObserveWithExemplar(value float64, exemplar Labels) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index 8548dd18e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "os" - "strconv" - "strings" -) - -type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector is the obsolete version of collectors.NewProcessCollector. -// See there for documentation. -// -// Deprecated: Use collectors.NewProcessCollector instead. -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - } - - if opts.PidFn == nil { - c.pidFn = getPIDFn() - } else { - c.pidFn = opts.PidFn - } - - // Set up process metric collection if supported by the runtime. - if canCollectProcess() { - c.collectFn = c.processCollect - } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } - } - - return c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) - } - ch <- NewInvalidMetric(desc, err) -} - -// NewPidFileFn returns a function that retrieves a pid from the specified file. -// It is meant to be used for the PidFn field in ProcessCollectorOpts. -func NewPidFileFn(pidFilePath string) func() (int, error) { - return func() (int, error) { - content, err := os.ReadFile(pidFilePath) - if err != nil { - return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err) - } - pid, err := strconv.Atoi(strings.TrimSpace(string(content))) - if err != nil { - return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err) - } - - return pid, nil - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go deleted file mode 100644 index b1e363d6c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build js -// +build js - -package prometheus - -func canCollectProcess() bool { - return false -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - // noop on this platform - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go deleted file mode 100644 index c0152cdb6..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows && !js -// +build !windows,!js - -package prometheus - -import ( - "github.com/prometheus/procfs" -) - -func canCollectProcess() bool { - _, err := procfs.NewDefaultFS() - return err == nil -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.Stat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.Limits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go deleted file mode 100644 index f973398df..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -func canCollectProcess() bool { - return true -} - -var ( - modpsapi = syscall.NewLazyDLL("psapi.dll") - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") - procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") -) - -type processMemoryCounters struct { - // System interface description - // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex - - // Refer to the Golang internal implementation - // https://golang.org/src/internal/syscall/windows/psapi_windows.go - _ uint32 - PageFaultCount uint32 - PeakWorkingSetSize uintptr - WorkingSetSize uintptr - QuotaPeakPagedPoolUsage uintptr - QuotaPagedPoolUsage uintptr - QuotaPeakNonPagedPoolUsage uintptr - QuotaNonPagedPoolUsage uintptr - PagefileUsage uintptr - PeakPagefileUsage uintptr - PrivateUsage uintptr -} - -func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { - mem := processMemoryCounters{} - r1, _, err := procGetProcessMemoryInfo.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&mem)), - uintptr(unsafe.Sizeof(mem)), - ) - if r1 != 1 { - return mem, err - } else { - return mem, nil - } -} - -func getProcessHandleCount(handle windows.Handle) (uint32, error) { - var count uint32 - r1, _, err := procGetProcessHandleCount.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&count)), - ) - if r1 != 1 { - return 0, err - } else { - return count, nil - } -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } - - var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) - - mem, err := getProcessMemoryInfo(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) - - handles, err := getProcessHandleCount(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. -} - -func fileTimeToSeconds(ft windows.Filetime) float64 { - return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go deleted file mode 100644 index 9819917b8..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "bufio" - "io" - "net" - "net/http" -) - -const ( - closeNotifier = 1 << iota - flusher - hijacker - readerFrom - pusher -) - -type delegator interface { - http.ResponseWriter - - Status() int - Written() int64 -} - -type responseWriterDelegator struct { - http.ResponseWriter - - status int - written int64 - wroteHeader bool - observeWriteHeader func(int) -} - -func (r *responseWriterDelegator) Status() int { - return r.status -} - -func (r *responseWriterDelegator) Written() int64 { - return r.written -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - if r.observeWriteHeader != nil && !r.wroteHeader { - // Only call observeWriteHeader for the 1st time. It's a bug if - // WriteHeader is called more than once, but we want to protect - // against it here. Note that we still delegate the WriteHeader - // to the original ResponseWriter to not mask the bug from it. - r.observeWriteHeader(code) - } - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type ( - closeNotifierDelegator struct{ *responseWriterDelegator } - flusherDelegator struct{ *responseWriterDelegator } - hijackerDelegator struct{ *responseWriterDelegator } - readerFromDelegator struct{ *responseWriterDelegator } - pusherDelegator struct{ *responseWriterDelegator } -) - -func (d closeNotifierDelegator) CloseNotify() <-chan bool { - //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. - return d.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (d flusherDelegator) Flush() { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - d.ResponseWriter.(http.Flusher).Flush() -} - -func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return d.ResponseWriter.(http.Hijacker).Hijack() -} - -func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) - d.written += n - return n, err -} - -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) - -func init() { - // TODO(beorn7): Code generation would help here. - pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 - return d - } - pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return closeNotifierDelegator{d} - } - pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return flusherDelegator{d} - } - pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 - return struct { - *responseWriterDelegator - http.Flusher - http.CloseNotifier - }{d, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return hijackerDelegator{d} - } - pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 - return struct { - *responseWriterDelegator - http.Hijacker - http.CloseNotifier - }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - }{d, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 - return readerFromDelegator{d} - } - pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.CloseNotifier - }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - }{d, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - }{d, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - //nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users. - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go deleted file mode 100644 index a4cc9810b..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ /dev/null @@ -1,395 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promhttp provides tooling around HTTP servers and clients. -// -// First, the package allows the creation of http.Handler instances to expose -// Prometheus metrics via HTTP. promhttp.Handler acts on the -// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a -// custom registry or anything that implements the Gatherer interface. It also -// allows the creation of handlers that act differently on errors or allow to -// log errors. -// -// Second, the package provides tooling to instrument instances of http.Handler -// via middleware. Middleware wrappers follow the naming scheme -// InstrumentHandlerX, where X describes the intended use of the middleware. -// See each function's doc comment for specific details. -// -// Finally, the package allows for an http.RoundTripper to be instrumented via -// middleware. Middleware wrappers follow the naming scheme -// InstrumentRoundTripperX, where X describes the intended use of the -// middleware. See each function's doc comment for specific details. -package promhttp - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" - - "github.com/prometheus/client_golang/prometheus" -) - -const ( - contentTypeHeader = "Content-Type" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an http.Handler for the prometheus.DefaultGatherer, using -// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has -// no error logging, and it applies compression if requested by the client. -// -// The returned http.Handler is already instrumented using the -// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you -// create multiple http.Handlers by separate calls of the Handler function, the -// metrics used for instrumentation will be shared between them, providing -// global scrape counts. -// -// This function is meant to cover the bulk of basic use cases. If you are doing -// anything that requires more customization (including using a non-default -// Gatherer, different instrumentation, and non-default HandlerOpts), use the -// HandlerFor function. See there for details. -func Handler() http.Handler { - return InstrumentMetricHandler( - prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), - ) -} - -// HandlerFor returns an uninstrumented http.Handler for the provided -// Gatherer. The behavior of the Handler is defined by the provided -// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom -// Gatherers, with non-default HandlerOpts, and/or with custom (or no) -// instrumentation. Use the InstrumentMetricHandler function to apply the same -// kind of instrumentation as it is used by the Handler function. -func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts) -} - -// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which -// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after -// call to `done` of that `Gather`. -func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler { - var ( - inFlightSem chan struct{} - errCnt = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_errors_total", - Help: "Total number of internal errors encountered by the promhttp metric handler.", - }, - []string{"cause"}, - ) - ) - - if opts.MaxRequestsInFlight > 0 { - inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) - } - if opts.Registry != nil { - // Initialize all possibilities that can occur below. - errCnt.WithLabelValues("gathering") - errCnt.WithLabelValues("encoding") - if err := opts.Registry.Register(errCnt); err != nil { - are := &prometheus.AlreadyRegisteredError{} - if errors.As(err, are) { - errCnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - } - - h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - if inFlightSem != nil { - select { - case inFlightSem <- struct{}{}: // All good, carry on. - defer func() { <-inFlightSem }() - default: - http.Error(rsp, fmt.Sprintf( - "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, - ), http.StatusServiceUnavailable) - return - } - } - mfs, done, err := reg.Gather() - defer done() - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error gathering metrics:", err) - } - errCnt.WithLabelValues("gathering").Inc() - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - if len(mfs) == 0 { - // Still report the error if no metrics have been gathered. - httpError(rsp, err) - return - } - case HTTPErrorOnError: - httpError(rsp, err) - return - } - } - - var contentType expfmt.Format - if opts.EnableOpenMetrics { - contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) - } else { - contentType = expfmt.Negotiate(req.Header) - } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - // handleError handles the error according to opts.ErrorHandling - // and returns true if we have to abort after the handling. - handleError := func(err error) bool { - if err == nil { - return false - } - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding and sending metric family:", err) - } - errCnt.WithLabelValues("encoding").Inc() - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case HTTPErrorOnError: - // We cannot really send an HTTP error at this - // point because we most likely have written - // something to rsp already. But at least we can - // stop sending. - return true - } - // Do nothing in all other cases, including ContinueOnError. - return false - } - - for _, mf := range mfs { - if handleError(enc.Encode(mf)) { - return - } - } - if closer, ok := enc.(expfmt.Closer); ok { - // This in particular takes care of the final "# EOF\n" line for OpenMetrics. - if handleError(closer.Close()) { - return - } - } - }) - - if opts.Timeout <= 0 { - return h - } - return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( - "Exceeded configured timeout of %v.\n", - opts.Timeout, - )) -} - -// InstrumentMetricHandler is usually used with an http.Handler returned by the -// HandlerFor function. It instruments the provided http.Handler with two -// metrics: A counter vector "promhttp_metric_handler_requests_total" to count -// scrapes partitioned by HTTP status code, and a gauge -// "promhttp_metric_handler_requests_in_flight" to track the number of -// simultaneous scrapes. This function idempotently registers collectors for -// both metrics with the provided Registerer. It panics if the registration -// fails. The provided metrics are useful to see how many scrapes hit the -// monitored target (which could be from different Prometheus servers or other -// scrapers), and how often they overlap (which would result in more than one -// scrape in flight at the same time). Note that the scrapes-in-flight gauge -// will contain the scrape by which it is exposed, while the scrape counter will -// only get incremented after the scrape is complete (as only then the status -// code is known). For tracking scrape durations, use the -// "scrape_duration_seconds" gauge created by the Prometheus server upon each -// scrape. -func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { - cnt := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_requests_total", - Help: "Total number of scrapes by HTTP status code.", - }, - []string{"code"}, - ) - // Initialize the most likely HTTP status codes. - cnt.WithLabelValues("200") - cnt.WithLabelValues("500") - cnt.WithLabelValues("503") - if err := reg.Register(cnt); err != nil { - are := &prometheus.AlreadyRegisteredError{} - if errors.As(err, are) { - cnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - - gge := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "promhttp_metric_handler_requests_in_flight", - Help: "Current number of scrapes being served.", - }) - if err := reg.Register(gge); err != nil { - are := &prometheus.AlreadyRegisteredError{} - if errors.As(err, are) { - gge = are.ExistingCollector.(prometheus.Gauge) - } else { - panic(err) - } - } - - return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) -} - -// HandlerErrorHandling defines how a Handler serving metrics will handle -// errors. -type HandlerErrorHandling int - -// These constants cause handlers serving metrics to behave as described if -// errors are encountered. -const ( - // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. Note that HTTP - // errors cannot be served anymore once the beginning of a regular - // payload has been sent. Thus, in the (unlikely) case that encoding the - // payload into the negotiated wire format fails, serving the response - // will simply be aborted. Set an ErrorLog in HandlerOpts to detect - // those errors. - HTTPErrorOnError HandlerErrorHandling = iota - // Ignore errors and try to serve as many metrics as possible. However, - // if no metrics can be served, serve an HTTP status code 500 and the - // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. In this case, it is highly - // recommended to provide other means of detecting errors: By setting an - // ErrorLog in HandlerOpts, the errors are logged. By providing a - // Registry in HandlerOpts, the exposed metrics include an error counter - // "promhttp_metric_handler_errors_total", which can be used for - // alerts. - ContinueOnError - // Panic upon the first error encountered (useful for "crash only" apps). - PanicOnError -) - -// Logger is the minimal interface HandlerOpts needs for logging. Note that -// log.Logger from the standard library implements this interface, and it is -// easy to implement by custom loggers, if they don't do so already anyway. -type Logger interface { - Println(v ...interface{}) -} - -// HandlerOpts specifies options how to serve metrics via an http.Handler. The -// zero value of HandlerOpts is a reasonable default. -type HandlerOpts struct { - // ErrorLog specifies an optional Logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. Note that the - // type of a reported error is often prometheus.MultiError, which - // formats into a multi-line error string. If you want to avoid the - // latter, create a Logger implementation that detects a - // prometheus.MultiError and formats the contained errors into one line. - ErrorLog Logger - // ErrorHandling defines how errors are handled. Note that errors are - // logged regardless of the configured ErrorHandling provided ErrorLog - // is not nil. - ErrorHandling HandlerErrorHandling - // If Registry is not nil, it is used to register a metric - // "promhttp_metric_handler_errors_total", partitioned by "cause". A - // failed registration causes a panic. Note that this error counter is - // different from the instrumentation you get from the various - // InstrumentHandler... helpers. It counts errors that don't necessarily - // result in a non-2xx HTTP status code. There are two typical cases: - // (1) Encoding errors that only happen after streaming of the HTTP body - // has already started (and the status code 200 has been sent). This - // should only happen with custom collectors. (2) Collection errors with - // no effect on the HTTP status code because ErrorHandling is set to - // ContinueOnError. - Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. - DisableCompression bool - // The number of concurrent HTTP requests is limited to - // MaxRequestsInFlight. Additional requests are responded to with 503 - // Service Unavailable and a suitable message in the body. If - // MaxRequestsInFlight is 0 or negative, no limit is applied. - MaxRequestsInFlight int - // If handling a request takes longer than Timeout, it is responded to - // with 503 ServiceUnavailable and a suitable Message. No timeout is - // applied if Timeout is 0 or negative. Note that with the current - // implementation, reaching the timeout simply ends the HTTP requests as - // described above (and even that only if sending of the body hasn't - // started yet), while the bulk work of gathering all the metrics keeps - // running in the background (with the eventual result to be thrown - // away). Until the implementation is improved, it is recommended to - // implement a separate timeout in potentially slow Collectors. - Timeout time.Duration - // If true, the experimental OpenMetrics encoding is added to the - // possible options during content negotiation. Note that Prometheus - // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is - // the only way to transmit exemplars. However, the move to OpenMetrics - // is not completely transparent. Most notably, the values of "quantile" - // labels of Summaries and "le" labels of Histograms are formatted with - // a trailing ".0" if they would otherwise look like integer numbers - // (which changes the identity of the resulting series on the Prometheus - // server). - EnableOpenMetrics bool -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerError. Error contents is -// supposed to be uncompressed plain text. Same as with a plain http.Error, this -// must not be called if the header or any payload has already been sent. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go deleted file mode 100644 index 210867816..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// The RoundTripperFunc type is an adapter to allow the use of ordinary -// functions as RoundTrippers. If f is a function with the appropriate -// signature, RountTripperFunc(f) is a RoundTripper that calls f. -type RoundTripperFunc func(req *http.Request) (*http.Response, error) - -// RoundTrip implements the RoundTripper interface. -func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return rt(r) -} - -// InstrumentRoundTripperInFlight is a middleware that wraps the provided -// http.RoundTripper. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.RoundTripper. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return func(r *http.Request) (*http.Response, error) { - gauge.Inc() - defer gauge.Dec() - return next.RoundTrip(r) - } -} - -// InstrumentRoundTripperCounter is a middleware that wraps the provided -// http.RoundTripper to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. For the "method" label a predefined default label value set -// is used to filter given values. Values besides predefined values will count -// as `unknown` method.`WithExtraMethods` can be used to add more -// methods to the set. Partitioning of the CounterVec happens by HTTP status code -// and/or HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped RoundTripper panics or returns a non-nil error, the Counter -// is not incremented. -// -// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := defaultOptions() - for _, o := range opts { - o.apply(rtOpts) - } - - code, method := checkLabels(counter) - - return func(r *http.Request) (*http.Response, error) { - resp, err := next.RoundTrip(r) - if err == nil { - addWithExemplar( - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - 1, - rtOpts.getExemplarFn(r.Context()), - ) - } - return resp, err - } -} - -// InstrumentRoundTripperDuration is a middleware that wraps the provided -// http.RoundTripper to observe the request duration with the provided -// ObserverVec. The ObserverVec must have zero, one, or two non-const -// non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. For the "method" label a predefined -// default label value set is used to filter given values. Values besides -// predefined values will count as `unknown` method. `WithExtraMethods` -// can be used to add more methods to the set. The Observe method of the Observer -// in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped RoundTripper panics or returns a non-nil error, no values are -// reported. -// -// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := defaultOptions() - for _, o := range opts { - o.apply(rtOpts) - } - - code, method := checkLabels(obs) - - return func(r *http.Request) (*http.Response, error) { - start := time.Now() - resp, err := next.RoundTrip(r) - if err == nil { - observeWithExemplar( - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), - time.Since(start).Seconds(), - rtOpts.getExemplarFn(r.Context()), - ) - } - return resp, err - } -} - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) - - return next.RoundTrip(r) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go deleted file mode 100644 index cca67a78a..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "errors" - "net/http" - "strconv" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -// magicString is used for the hacky label test in checkLabels. Remove once fixed. -const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" - -// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver], -// which falls back to [prometheus.Observer.Observe] if no labels are provided. -func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) { - if labels == nil { - obs.Observe(val) - return - } - obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels) -} - -// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar], -// which falls back to [prometheus.Counter.Add] if no labels are provided. -func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) { - if labels == nil { - obs.Add(val) - return - } - obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels) -} - -// InstrumentHandlerInFlight is a middleware that wraps the provided -// http.Handler. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.Handler. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - g.Inc() - defer g.Dec() - next.ServeHTTP(w, r) - }) -} - -// InstrumentHandlerDuration is a middleware that wraps the provided -// http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have valid metric and label names and must have zero, -// one, or two non-const non-curried labels. For those, the only allowed label -// names are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the request duration -// in seconds. Partitioning happens by HTTP status code and/or HTTP method if -// the respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - code, method := checkLabels(obs) - - if code { - return func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) - } - } - - return func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - next.ServeHTTP(w, r) - - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) - } -} - -// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler -// to observe the request result with the provided CounterVec. The CounterVec -// must have valid metric and label names and must have zero, one, or two -// non-const non-curried labels. For those, the only allowed label names are -// "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the -// CounterVec happens by HTTP status code and/or HTTP method if the respective -// instance label names are present in the CounterVec. For unpartitioned -// counting, use a CounterVec with zero labels. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, the Counter is not incremented. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - code, method := checkLabels(counter) - - if code { - return func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - addWithExemplar( - counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) - } - } - - return func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - addWithExemplar( - counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - 1, - hOpts.getExemplarFn(r.Context()), - ) - } -} - -// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided -// http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have valid -// metric and label names and must have zero, one, or two non-const non-curried -// labels. For those, the only allowed label names are "code" and "method". The -// function panics otherwise. For the "method" label a predefined default label -// value set is used to filter given values. Values besides predefined values -// will count as `unknown` method.`WithExtraMethods` can be used to add more -// methods to the set. The Observe method of the Observer in the -// ObserverVec is called with the request duration in seconds. Partitioning -// happens by HTTP status code and/or HTTP method if the respective instance -// label names are present in the ObserverVec. For unpartitioned observations, -// use an ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler panics before calling WriteHeader, no value is -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - code, method := checkLabels(obs) - - return func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, func(status int) { - observeWithExemplar( - obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), - time.Since(now).Seconds(), - hOpts.getExemplarFn(r.Context()), - ) - }) - next.ServeHTTP(d, r) - } -} - -// InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. The -// ObserverVec must have valid metric and label names and must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the request size in -// bytes. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - code, method := checkLabels(obs) - if code { - return func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) - } - } - - return func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - size := computeApproximateRequestSize(r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), - float64(size), - hOpts.getExemplarFn(r.Context()), - ) - } -} - -// InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. The -// ObserverVec must have valid metric and label names and must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the response size in -// bytes. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { - hOpts := defaultOptions() - for _, o := range opts { - o.apply(hOpts) - } - - code, method := checkLabels(obs) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - observeWithExemplar( - obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), - float64(d.Written()), - hOpts.getExemplarFn(r.Context()), - ) - }) -} - -// checkLabels returns whether the provided Collector has a non-const, -// non-curried label named "code" and/or "method". It panics if the provided -// Collector does not have a Desc or has more than one Desc or its Desc is -// invalid. It also panics if the Collector has any non-const, non-curried -// labels that are not named "code" or "method". -func checkLabels(c prometheus.Collector) (code, method bool) { - // TODO(beorn7): Remove this hacky way to check for instance labels - // once Descriptors can have their dimensionality queried. - var ( - desc *prometheus.Desc - m prometheus.Metric - pm dto.Metric - lvs []string - ) - - // Get the Desc from the Collector. - descc := make(chan *prometheus.Desc, 1) - c.Describe(descc) - - select { - case desc = <-descc: - default: - panic("no description provided by collector") - } - select { - case <-descc: - panic("more than one description provided by collector") - default: - } - - close(descc) - - // Make sure the Collector has a valid Desc by registering it with a - // temporary registry. - prometheus.NewRegistry().MustRegister(c) - - // Create a ConstMetric with the Desc. Since we don't know how many - // variable labels there are, try for as long as it needs. - for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { - m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) - } - - // Write out the metric into a proto message and look at the labels. - // If the value is not the magicString, it is a constLabel, which doesn't interest us. - // If the label is curried, it doesn't interest us. - // In all other cases, only "code" or "method" is allowed. - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString || isLabelCurried(c, name) { - continue - } - switch name { - case "code": - code = true - case "method": - method = true - default: - panic("metric partitioned with non-supported labels") - } - } - return -} - -func isLabelCurried(c prometheus.Collector, label string) bool { - // This is even hackier than the label test above. - // We essentially try to curry again and see if it works. - // But for that, we need to type-convert to the two - // types we use here, ObserverVec or *CounterVec. - switch v := c.(type) { - case *prometheus.CounterVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - case prometheus.ObserverVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - default: - panic("unsupported metric vec type") - } - return true -} - -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - -func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { - if !(code || method) { - return emptyLabels - } - labels := prometheus.Labels{} - - if code { - labels["code"] = sanitizeCode(status) - } - if method { - labels["method"] = sanitizeMethod(reqMethod, extraMethods...) - } - - return labels -} - -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} - -// If the wrapped http.Handler has a known method, it will be sanitized and returned. -// Otherwise, "unknown" will be returned. The known method list can be extended -// as needed by using extraMethods parameter. -func sanitizeMethod(m string, extraMethods ...string) string { - // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for - // the methods chosen as default. - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - case "TRACE", "trace": - return "trace" - case "PATCH", "patch": - return "patch" - default: - for _, method := range extraMethods { - if strings.EqualFold(m, method) { - return strings.ToLower(m) - } - } - return "unknown" - } -} - -// If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, sanitizeCode will return 200, for consistency with behavior in -// the stdlib. -func sanitizeCode(s int) string { - // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200, 0: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - if s >= 100 && s <= 599 { - return strconv.Itoa(s) - } - return "unknown" - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go deleted file mode 100644 index c590d912c..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "context" - - "github.com/prometheus/client_golang/prometheus" -) - -// Option are used to configure both handler (middleware) or round tripper. -type Option interface { - apply(*options) -} - -// options store options for both a handler or round tripper. -type options struct { - extraMethods []string - getExemplarFn func(requestCtx context.Context) prometheus.Labels -} - -func defaultOptions() *options { - return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} -} - -type optionApplyFunc func(*options) - -func (o optionApplyFunc) apply(opt *options) { o(opt) } - -// WithExtraMethods adds additional HTTP methods to the list of allowed methods. -// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. -// -// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. -func WithExtraMethods(methods ...string) Option { - return optionApplyFunc(func(o *options) { - o.extraMethods = methods - }) -} - -// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. -// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric -// will get instrumented without exemplar. -func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { - return optionApplyFunc(func(o *options) { - o.getExemplarFn = getExemplarFn - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index 09e34d307..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,1072 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "sort" - "strings" - "sync" - "unicode/utf8" - - "github.com/cespare/xxhash/v2" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe method does not yield any descriptors) are excluded from the check. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -// Error formats the contained errors as a bullet point list, preceded by the -// total number of errors. Note that this results in a multi-line string. -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements Registerer, Gatherer, -// and Collector. The zero value is not usable. Create instances with -// NewRegistry or NewPedanticRegistry. -// -// Registry implements Collector to allow it to be used for creating groups of -// metrics. See the Grouping example for how this can be done. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - uncheckedCollectors []Collector - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // All desc IDs XOR'd together. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, XOR it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID ^= desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // A Collector yielding no Desc at all is considered unchecked. - if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil - } - if existing, exists := r.collectorsByID[collectorID]; exists { - switch e := existing.(type) { - case *wrappingCollector: - return AlreadyRegisteredError{ - ExistingCollector: e.unwrapRecursively(), - NewCollector: c, - } - default: - return AlreadyRegisteredError{ - ExistingCollector: e, - NewCollector: c, - } - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // All desc IDs XOR'd together. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID ^= desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - r.mtx.RLock() - - if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 { - // Fast path. - r.mtx.RUnlock() - return nil, nil - } - - var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) - for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector - } - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() - - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. - defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } - } - }() - - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckdMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// Describe implements Collector. -func (r *Registry) Describe(ch chan<- *Desc) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - // Only report the checked Collectors; unchecked collectors don't report any - // Desc. - for _, c := range r.collectorsByID { - c.Describe(ch) - } -} - -// Collect implements Collector. -func (r *Registry) Collect(ch chan<- Metric) { - r.mtx.RLock() - defer r.mtx.RUnlock() - - for _, c := range r.collectorsByID { - c.Collect(ch) - } - for _, c := range r.uncheckedCollectors { - c.Collect(ch) - } -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0o644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %w", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calls are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - multiErr := MultiError{} - if errors.As(err, &multiErr) { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// checkSuffixCollisions checks for collisions with the “magic” suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } - } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } - } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) - } - } - return nil -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, -) error { - name := metricFamily.GetName() - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), - ) - } - - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := xxhash.New() - h.WriteString(name) - h.Write(separatorByteSlice) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) { - // We cannot sort dtoMetric.Label in place as it is immutable by contract. - copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) - copy(copiedLabels, dtoMetric.Label) - sort.Sort(internal.LabelPairSorter(copiedLabels)) - dtoMetric.Label = copiedLabels - } - for _, lp := range dtoMetric.Label { - h.WriteString(lp.GetName()) - h.Write(separatorByteSlice) - h.WriteString(lp.GetValue()) - h.Write(separatorByteSlice) - } - hSum := h.Sum64() - if _, exists := metricHashes[hSum]; exists { - return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, - ) - } - metricHashes[hSum] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) - copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(internal.LabelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} - -var _ TransactionalGatherer = &MultiTRegistry{} - -// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple -// transactional gatherers. -// -// It is caller responsibility to ensure two registries have mutually exclusive metric families, -// no deduplication will happen. -type MultiTRegistry struct { - tGatherers []TransactionalGatherer -} - -// NewMultiTRegistry creates MultiTRegistry. -func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry { - return &MultiTRegistry{ - tGatherers: tGatherers, - } -} - -// Gather implements TransactionalGatherer interface. -func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) { - errs := MultiError{} - - dFns := make([]func(), 0, len(r.tGatherers)) - // TODO(bwplotka): Implement concurrency for those? - for _, g := range r.tGatherers { - // TODO(bwplotka): Check for duplicates? - m, d, err := g.Gather() - errs.Append(err) - - mfs = append(mfs, m...) - dFns = append(dFns, d) - } - - // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already. - sort.Slice(mfs, func(i, j int) bool { - return *mfs[i].Name < *mfs[j].Name - }) - return mfs, func() { - for _, d := range dFns { - d() - } - }, errs.MaybeUnwrap() -} - -// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory -// used by metric family is no longer used by a caller. This allows implementations with cache. -type TransactionalGatherer interface { - // Gather returns metrics in a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - // - // Important: done is expected to be triggered (even if the error occurs!) - // once caller does not need returned slice of dto.MetricFamily. - Gather() (_ []*dto.MetricFamily, done func(), err error) -} - -// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function. -func ToTransactionalGatherer(g Gatherer) TransactionalGatherer { - return &noTransactionGatherer{g: g} -} - -type noTransactionGatherer struct { - g Gatherer -} - -// Gather implements TransactionalGatherer interface. -func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) { - mfs, err := g.g.Gather() - return mfs, func() {}, err -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index 7bc448a89..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,747 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/beorn7/perks/quantile" - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v1.0.0 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. Observations are - // usually positive or zero. Negative observations are accepted but - // prevent current versions of Prometheus from properly detecting - // counter resets in the sum of observations. See - // https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations - // for details. - Observe(float64) -} - -var errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v1.0.0 of the library. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile” - // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is an empty map, resulting in a summary without - // quantiles. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Only applies to pre-calculated quantiles, does not - // apply to _sum and _count. Must be positive. The default value is - // DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// Problem with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = map[float64]float64{} - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - if len(opts.Objectives) == 0 { - // Use the lock-free implementation of a Summary without objectives. - s := &noObjectivesSummary{ - desc: desc, - labelPairs: MakeLabelPairs(desc, labelValues), - counts: [2]*summaryCounts{{}, {}}, - } - s.init(s) // Init self-collection. - return s - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: MakeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type summaryCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 -} - -type noObjectivesSummary struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // summaryCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the summary) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*summaryCounts - - labelPairs []*dto.LabelPair -} - -func (s *noObjectivesSummary) Desc() *Desc { - return s.desc -} - -func (s *noObjectivesSummary) Observe(v float64) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&s.countAndHotIdx, 1) - hotCounts := s.counts[n>>63] - - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (s *noObjectivesSummary) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - s.writeMtx.Lock() - defer s.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := s.counts[n>>63] - coldCounts := s.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - } - - out.Summary = sum - out.Label = s.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - return nil -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *MetricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile” is an illegal -// label name. NewSummaryVec will panic if this label name is used. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.MetricVec.GetMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.MetricVec.CurryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: MakeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index f28a76f3a..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the -// following way: -// -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. The observed -// duration is also returned. ObserveDuration is usually called with a defer -// statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() time.Duration { - d := time.Since(t.begin) - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 0f9ce63f4..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index 2d3abc1cb..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - "time" - "unicode/utf8" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/types/known/timestamppb" - - "github.com/prometheus/client_golang/prometheus/internal" - - dto "github.com/prometheus/client_model/go" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. Use UntypedValue to mark a metric -// with an unknown type. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -var ( - CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }() - GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }() - UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }() -) - -func (v ValueType) ToDTO() *dto.MetricType { - switch v { - case CounterValue: - return CounterMetricTypePtr - case GaugeValue: - return GaugeMetricTypePtr - default: - return UntypedMetricTypePtr - } -} - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: MakeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - - metric := &dto.Metric{} - if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil { - return nil, err - } - - return &constMetric{ - desc: desc, - metric: metric, - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - metric *dto.Metric -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - out.Label = m.metric.Label - out.Counter = m.metric.Counter - out.Gauge = m.metric.Gauge - out.Untyped = m.metric.Untyped - return nil -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - e *dto.Exemplar, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -// MakeLabelPairs is a helper function to create protobuf LabelPairs from the -// variable and constant labels in the provided Desc. The values for the -// variable labels are defined by the labelValues slice, which must be in the -// same order as the corresponding variable labels in the Desc. -// -// This function is only needed for custom Metric implementations. See MetricVec -// example. -func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(internal.LabelPairSorter(labelPairs)) - return labelPairs -} - -// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 128 - -// newExemplar creates a new dto.Exemplar from the provided values. An error is -// returned if any of the label names or values are invalid or if the total -// number of runes in the label names and values exceeds ExemplarMaxRunes. -func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { - e := &dto.Exemplar{} - e.Value = proto.Float64(value) - tsProto := timestamppb.New(ts) - if err := tsProto.CheckValid(); err != nil { - return nil, err - } - e.Timestamp = tsProto - labelPairs := make([]*dto.LabelPair, 0, len(l)) - var runes int - for name, value := range l { - if !checkLabelName(name) { - return nil, fmt.Errorf("exemplar label name %q is invalid", name) - } - runes += utf8.RuneCountInString(name) - if !utf8.ValidString(value) { - return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) - } - runes += utf8.RuneCountInString(value) - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(name), - Value: proto.String(value), - }) - } - if runes > ExemplarMaxRunes { - return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) - } - e.Label = labelPairs - return e, nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index 7ae322590..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,642 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// MetricVec is a Collector to bundle metrics of the same name that differ in -// their label values. MetricVec is not used directly but as a building block -// for implementations of vectors of a given metric type, like GaugeVec, -// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be -// used for custom Metric implementations. -// -// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in -// FooVec and initialize it with NewMetricVec. Implement wrappers for -// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather -// than (Metric, error). Similarly, create a wrapper for CurryWith that returns -// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also -// add the convenience methods WithLabelValues, With, and MustCurryWith, which -// panic instead of returning errors. See also the MetricVec example. -type MetricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 - hashAddByte func(h uint64, b byte) uint64 -} - -// NewMetricVec returns an initialized metricVec. -func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { - return &MetricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *MetricVec) Delete(labels Labels) bool { - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) -} - -// DeletePartialMatch deletes all metrics where the variable labels contain all of those -// passed in as labels. The order of the labels does not matter. -// It returns the number of metrics deleted. -// -// Note that curried labels will never be matched if deleting from the curried vector. -// To match curried labels with DeletePartialMatch, it must be called on the base vector. -func (m *MetricVec) DeletePartialMatch(labels Labels) int { - return m.metricMap.deleteByLabels(labels, m.curry) -} - -// Without explicit forwarding of Describe, Collect, Reset, those methods won't -// show up in GoDoc. - -// Describe implements Collector. -func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } - -// Collect implements Collector. -func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } - -// Reset deletes all metrics in this vector. -func (m *MetricVec) Reset() { m.metricMap.Reset() } - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the MetricVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -// -// Note that CurryWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", label) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{i, val}) - } - } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &MetricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil -} - -// GetMetricWithLabelValues returns the Metric for the given slice of label -// values (same order as the variable labels in Desc). If that combination of -// label values is accessed for the first time, a new Metric is created (by -// calling the newMetric function provided during construction of the -// MetricVec). -// -// It is possible to call this method without using the returned Metric to only -// create the new Metric but leave it in its initial state. -// -// Keeping the Metric for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Metric from the MetricVec. In that case, the -// Metric will still exist, but it will not be exported anymore, even if a -// Metric with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of variable labels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// -// Note that GetMetricWithLabelValues is usually not called directly but through -// a wrapper around MetricVec, implementing a vector for a specific Metric -// implementation, for example GaugeVec. -func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil -} - -// GetMetricWith returns the Metric for the given Labels map (the label names -// must match those of the variable labels in Desc). If that label map is -// accessed for the first time, a new Metric is created. Implications of -// creating a Metric without using it and keeping the Metric for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the variable labels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -// -// Note that GetMetricWith is usually not called directly but through a wrapper -// around MetricVec, implementing a vector for a specific Metric implementation, -// for example GaugeVec. -func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil -} - -func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", label) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.metrics { - delete(m.metrics, h) - } -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - - i := findMetricWithLabelValues(metrics, lvs, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - i := findMetricWithLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - old := metrics - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - old[len(old)-1] = metricWithLabelValues{} - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByLabels deletes a metric if the given labels are present in the metric. -func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int { - m.mtx.Lock() - defer m.mtx.Unlock() - - var numDeleted int - - for h, metrics := range m.metrics { - i := findMetricWithPartialLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - // Didn't find matching labels in this metric slice. - continue - } - delete(m.metrics, h) - numDeleted++ - } - - return numDeleted -} - -// findMetricWithPartialLabel returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithPartialLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchPartialLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -// indexOf searches the given slice of strings for the target string and returns -// the index or len(items) as well as a boolean whether the search succeeded. -func indexOf(target string, items []string) (int, bool) { - for i, l := range items { - if l == target { - return i, true - } - } - return len(items), false -} - -// valueMatchesVariableOrCurriedValue determines if a value was previously curried, -// and returns whether it matches either the "base" value or the curried value accordingly. -// It also indicates whether the match is against a curried or uncurried value. -func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) { - for _, curriedValue := range curry { - if curriedValue.index == index { - // This label was curried. See if the curried value matches our target. - return curriedValue.value == targetValue, true - } - } - // This label was not curried. See if the current value matches our target label. - return values[index] == targetValue, false -} - -// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present. -func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - for l, v := range labels { - // Check if the target label exists in our metrics and get the index. - varLabelIndex, validLabel := indexOf(l, desc.variableLabels) - if validLabel { - // Check the value of that label against the target value. - // We don't consider curried values in partial matches. - matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry) - if matches && !curried { - continue - } - } - return false - } - return true -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) - if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) - if !ok { - lvs := extractLabelValues(m.desc, labels, curry) - metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { - return false - } - var iLVs, iCurry int - for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { - return false - } - iLVs++ - } - return true -} - -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { - return false - } - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if values[i] != labels[k] { - return false - } - } - return true -} - -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = labels[k] - } - return labelValues -} - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index 1498ee144..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. Wrapping a nil value is valid, resulting -// in a no-op Registerer. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics -// exposed. See also -// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// Wrapping a nil value is valid, resulting in a no-op Registerer. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, -// respectively.) -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - if r.wrappedRegisterer == nil { - return nil - } - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - if r.wrappedRegisterer == nil { - return - } - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - if r.wrappedRegisterer == nil { - return false - } - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -func (c *wrappingCollector) unwrapRecursively() Collector { - switch wc := c.wrappedCollector.(type) { - case *wrappingCollector: - return wc.unwrapRecursively() - default: - return wc - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(internal.LabelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e410..000000000 --- a/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index 35904ea19..000000000 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,914 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: io/prometheus/client/metrics.proto - -package io_prometheus_client - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type MetricType int32 - -const ( - // COUNTER must use the Metric field "counter". - MetricType_COUNTER MetricType = 0 - // GAUGE must use the Metric field "gauge". - MetricType_GAUGE MetricType = 1 - // SUMMARY must use the Metric field "summary". - MetricType_SUMMARY MetricType = 2 - // UNTYPED must use the Metric field "untyped". - MetricType_UNTYPED MetricType = 3 - // HISTOGRAM must use the Metric field "histogram". - MetricType_HISTOGRAM MetricType = 4 - // GAUGE_HISTOGRAM must use the Metric field "histogram". - MetricType_GAUGE_HISTOGRAM MetricType = 5 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", - 5: "GAUGE_HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, - "GAUGE_HISTOGRAM": 5, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} - -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} - -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} - -func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{0} -} - -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{1} -} - -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{2} -} - -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{3} -} - -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{4} -} - -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{5} -} - -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - // Buckets for the conventional histogram. - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. - // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and - // then each power of two is divided into 2^n logarithmic buckets. - // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n). - // In the future, more bucket schemas may be added using numbers < -4 or > 8. - Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"` - ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` - ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` - ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` - // Negative buckets for the native histogram. - NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"` - // Use either "negative_delta" or "negative_count", the former for - // regular histograms with integer counts, the latter for float - // histograms. - NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` - NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` - // Positive buckets for the native histogram. - PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"` - // Use either "positive_delta" or "positive_count", the former for - // regular histograms with integer counts, the latter for float - // histograms. - PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` - PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{6} -} - -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleCountFloat() float64 { - if m != nil && m.SampleCountFloat != nil { - return *m.SampleCountFloat - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -func (m *Histogram) GetSchema() int32 { - if m != nil && m.Schema != nil { - return *m.Schema - } - return 0 -} - -func (m *Histogram) GetZeroThreshold() float64 { - if m != nil && m.ZeroThreshold != nil { - return *m.ZeroThreshold - } - return 0 -} - -func (m *Histogram) GetZeroCount() uint64 { - if m != nil && m.ZeroCount != nil { - return *m.ZeroCount - } - return 0 -} - -func (m *Histogram) GetZeroCountFloat() float64 { - if m != nil && m.ZeroCountFloat != nil { - return *m.ZeroCountFloat - } - return 0 -} - -func (m *Histogram) GetNegativeSpan() []*BucketSpan { - if m != nil { - return m.NegativeSpan - } - return nil -} - -func (m *Histogram) GetNegativeDelta() []int64 { - if m != nil { - return m.NegativeDelta - } - return nil -} - -func (m *Histogram) GetNegativeCount() []float64 { - if m != nil { - return m.NegativeCount - } - return nil -} - -func (m *Histogram) GetPositiveSpan() []*BucketSpan { - if m != nil { - return m.PositiveSpan - } - return nil -} - -func (m *Histogram) GetPositiveDelta() []int64 { - if m != nil { - return m.PositiveDelta - } - return nil -} - -func (m *Histogram) GetPositiveCount() []float64 { - if m != nil { - return m.PositiveCount - } - return nil -} - -// A Bucket of a conventional histogram, each of which is treated as -// an individual counter-like time series by Prometheus. -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{7} -} - -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetCumulativeCountFloat() float64 { - if m != nil && m.CumulativeCountFloat != nil { - return *m.CumulativeCountFloat - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -// A BucketSpan defines a number of consecutive buckets in a native -// histogram with their offset. Logically, it would be more -// straightforward to include the bucket counts in the Span. However, -// the protobuf representation is more compact in the way the data is -// structured here (with all the buckets in a single array separate -// from the Spans). -type BucketSpan struct { - Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` - Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BucketSpan) Reset() { *m = BucketSpan{} } -func (m *BucketSpan) String() string { return proto.CompactTextString(m) } -func (*BucketSpan) ProtoMessage() {} -func (*BucketSpan) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{8} -} - -func (m *BucketSpan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketSpan.Unmarshal(m, b) -} -func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic) -} -func (m *BucketSpan) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketSpan.Merge(m, src) -} -func (m *BucketSpan) XXX_Size() int { - return xxx_messageInfo_BucketSpan.Size(m) -} -func (m *BucketSpan) XXX_DiscardUnknown() { - xxx_messageInfo_BucketSpan.DiscardUnknown(m) -} - -var xxx_messageInfo_BucketSpan proto.InternalMessageInfo - -func (m *BucketSpan) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return 0 -} - -func (m *BucketSpan) GetLength() uint32 { - if m != nil && m.Length != nil { - return *m.Length - } - return 0 -} - -type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{9} -} - -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{10} -} - -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_d1e5ddb18987a258, []int{11} -} - -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { - proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258) -} - -var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 896 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44, - 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48, - 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92, - 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0, - 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b, - 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3, - 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90, - 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25, - 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb, - 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19, - 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba, - 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b, - 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c, - 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0, - 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5, - 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd, - 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d, - 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b, - 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b, - 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f, - 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b, - 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8, - 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e, - 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79, - 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29, - 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48, - 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca, - 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9, - 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5, - 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe, - 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55, - 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e, - 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83, - 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65, - 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a, - 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8, - 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf, - 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1, - 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97, - 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc, - 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7, - 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b, - 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58, - 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b, - 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8, - 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f, - 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67, - 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28, - 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27, - 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41, - 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f, - 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f, - 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac, - 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a, - 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab, - 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00, -} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1a5..000000000 --- a/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index 7657f841d..000000000 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occurred. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index 64dc0eb40..000000000 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -// Closer is implemented by Encoders that need to be closed to finalize -// encoding. (For example, OpenMetrics needs a final `# EOF` line.) -// -// Note that all Encoder implementations returned from this package implement -// Closer, too, even if the Close call is a no-op. This happens in preparation -// for adding a Close method to the Encoder interface directly in a (mildly -// breaking) release in the future. -type Closer interface { - Close() error -} - -type encoderCloser struct { - encode func(*dto.MetricFamily) error - close func() error -} - -func (ec encoderCloser) Encode(v *dto.MetricFamily) error { - return ec.encode(v) -} - -func (ec encoderCloser) Close() error { - return ec.close() -} - -// Negotiate returns the Content-Type based on the given Accept header. If no -// appropriate accepted type is found, FmtText is returned (which is the -// Prometheus text format). This function will never negotiate FmtOpenMetrics, -// as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NegotiateIncludingOpenMetrics works like Negotiate but includes -// FmtOpenMetrics as an option for the result. Note that this function is -// temporary and will disappear once FmtOpenMetrics is fully supported and as -// such may be negotiated by the normal Negotiate function. -func NegotiateIncludingOpenMetrics(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. All -// Encoder implementations returned by NewEncoder also implement Closer, and -// callers should always call the Close method. It is currently only required -// for FmtOpenMetrics, but a future (breaking) release will add the Close method -// to the Encoder interface directly. The current version of the Encoder -// interface is kept for backwards compatibility. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }, - close: func() error { return nil }, - } - case FmtProtoCompact: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }, - close: func() error { return nil }, - } - case FmtProtoText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }, - close: func() error { return nil }, - } - case FmtText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }, - close: func() error { return nil }, - } - case FmtOpenMetrics: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) - return err - }, - close: func() error { - _, err := FinalizeOpenMetrics(w) - return err - }, - } - } - panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index 0f176fa64..000000000 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire protocols. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index f819e4f8b..000000000 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -//go:build gofuzz -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go deleted file mode 100644 index 9d94ae9ef..000000000 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the -// OpenMetrics text format and writes the resulting lines to 'out'. It returns -// the number of bytes written and any error encountered. The output will have -// the same order as the input, no further sorting is performed. Furthermore, -// this function assumes the input is already sanitized and does not perform any -// sanity checks. If the input contains duplicate metrics or invalid metric or -// label names, the conversion will result in invalid text format output. -// -// This function fulfills the type 'expfmt.encoder'. -// -// Note that OpenMetrics requires a final `# EOF` line. Since this function acts -// on individual metric families, it is the responsibility of the caller to -// append this line to 'out' once all metric families have been written. -// Conveniently, this can be done by calling FinalizeOpenMetrics. -// -// The output should be fully OpenMetrics compliant. However, there are a few -// missing features and peculiarities to avoid complications when switching from -// Prometheus to OpenMetrics or vice versa: -// -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. -// -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. -// -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). -// -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var ( - n int - metricType = in.GetType() - shortName = name - ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(shortName) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, true) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(shortName) - written += n - if err != nil { - return - } - switch metricType { - case dto.MetricType_COUNTER: - if strings.HasSuffix(name, "_total") { - n, err = w.WriteString(" counter\n") - } else { - n, err = w.WriteString(" unknown\n") - } - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" unknown\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), 0, false, - metric.Counter.Exemplar, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), 0, false, - nil, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), 0, false, - nil, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeOpenMetricsSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, - 0, metric.Summary.GetSampleCount(), true, - nil, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - 0, b.GetCumulativeCount(), true, - b.Exemplar, - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. -func FinalizeOpenMetrics(w io.Writer) (written int, err error) { - return w.Write([]byte("# EOF\n")) -} - -// writeOpenMetricsSample writes a single sample in OpenMetrics text format to -// w, given the metric name, the metric proto message itself, optionally an -// additional label name with a float64 value (use empty string as label name if -// not required), the value (optionally as float64 or uint64, determined by -// useIntValue), and optionally an exemplar (use nil if not required). The -// function returns the number of bytes written and any error encountered. -func writeOpenMetricsSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - floatValue float64, intValue uint64, useIntValue bool, - exemplar *dto.Exemplar, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - if useIntValue { - n, err = writeUint(w, intValue) - } else { - n, err = writeOpenMetricsFloat(w, floatValue) - } - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - // TODO(beorn7): Format this directly without converting to a float first. - n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) - written += n - if err != nil { - return written, err - } - } - if exemplar != nil { - n, err = writeExemplar(w, exemplar) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeExemplar writes the provided exemplar in OpenMetrics format to w. The -// function returns the number of bytes written and any error encountered. -func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { - written := 0 - n, err := w.WriteString(" # ") - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, e.GetValue()) - written += n - if err != nil { - return written, err - } - if e.Timestamp != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - err = (*e).Timestamp.CheckValid() - if err != nil { - return written, err - } - ts := (*e).Timestamp.AsTime() - // TODO(beorn7): Format this directly from components of ts to - // avoid overflow/underflow and precision issues of the float - // conversion. - n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting -// number would otherwise contain neither a "." nor an "e". -func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return w.WriteString("1.0") - case f == 0: - return w.WriteString("0.0") - case f == -1: - return w.WriteString("-1.0") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - if !bytes.ContainsAny(*bp, "e.") { - *bp = append(*bp, '.', '0') - } - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeUint is like writeInt just for uint64. -func writeUint(w enhancedWriter, u uint64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendUint((*bp)[:0], u, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index 5ba503b06..000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "math" - "strconv" - "strings" - "sync" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// enhancedWriter has all the enhanced write functions needed here. bufio.Writer -// implements it. -type enhancedWriter interface { - io.Writer - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) - WriteByte(c byte) error -} - -const ( - initialNumBufSize = 24 -) - -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) - }, - } - numBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, initialNumBufSize) - return &b - }, - } -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { - // Fail-fast checks. - if len(in.Metric) == 0 { - return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var n int - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, false) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - metricType := in.GetType() - switch metricType { - case dto.MetricType_COUNTER: - n, err = w.WriteString(" counter\n") - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Summary.GetSampleCount()), - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// writeSample writes a single sample in text format to w, given the metric -// name, the metric proto message itself, optionally an additional label name -// with a float64 value (use empty string as label name if not required), and -// the value. The function returns the number of bytes written and any error -// encountered. -func writeSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - value float64, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeFloat(w, value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeInt(w, *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -var ( - escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) - quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { - if includeDoubleQuote { - return quotedEscaper.WriteString(w, v) - } - return escaper.WriteString(w, v) -} - -// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes -// a few common cases for increased efficiency. For non-hardcoded cases, it uses -// strconv.AppendFloat to avoid allocations, similar to writeInt. -func writeFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return 1, w.WriteByte('1') - case f == 0: - return 1, w.WriteByte('0') - case f == -1: - return w.WriteString("-1") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeInt is equivalent to fmt.Fprint with an int64 argument but uses -// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid -// allocations. -func writeInt(w enhancedWriter, i int64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendInt((*bp)[:0], i, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index 84be0643e..000000000 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,775 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err == io.EOF { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - // Check for duplicate label names. - labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { - lName := l.GetName() - if _, exists := labels[lName]; !exists { - labels[lName] = struct{}{} - } else { - p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) - return nil - } - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := parseFloat(p.currentToken.String()) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} - -func parseFloat(s string) (float64, error) { - if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") - } - return strconv.ParseFloat(s, 64) -} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d5..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 26e92288c..000000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 35e739c7a..000000000 --- a/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true off the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - if a.Resolved() { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) - } - if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de4106..000000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 367afecd3..000000000 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializes a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index ef8956335..000000000 --- a/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the an alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ScrapeIntervalLabel is the name of the label that holds the scrape interval - // used to scrape a target. - ScrapeIntervalLabel = "__scrape_interval__" - - // ScrapeTimeoutLabel is the name of the label that holds the scrape - // timeout used to scrape a target. - ScrapeTimeoutLabel = "__scrape_timeout__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 6eda08a73..000000000 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index 00804b7fe..000000000 --- a/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) -) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b969170..000000000 --- a/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 8762b13c6..000000000 --- a/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index bb99889d2..000000000 --- a/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) - } - } - if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") - } - return nil -} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index c909b8aa8..000000000 --- a/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// Set implements pflag/flag.Value -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value -func (d *Duration) Type() string { - return "duration" -} - -var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$") - -// ParseDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - switch durationStr { - case "0": - // Allow 0 without a unit. - return 0, nil - case "": - return 0, errors.New("empty duration string") - } - matches := durationRE.FindStringSubmatch(durationStr) - if matches == nil { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var dur time.Duration - - // Parse the match at pos `pos` in the regex and use `mult` to turn that - // into ms, then add that value to the total parsed duration. - var overflowErr error - m := func(pos int, mult time.Duration) { - if matches[pos] == "" { - return - } - n, _ := strconv.Atoi(matches[pos]) - - // Check if the provided duration overflows time.Duration (> ~ 290years). - if n > int((1<<63-1)/mult/time.Millisecond) { - overflowErr = errors.New("duration out of range") - } - d := time.Duration(n) * time.Millisecond - dur += d * mult - - if dur < 0 { - overflowErr = errors.New("duration out of range") - } - } - - m(2, 1000*60*60*24*365) // y - m(4, 1000*60*60*24*7) // w - m(6, 1000*60*60*24) // d - m(8, 1000*60*60) // h - m(10, 1000*60) // m - m(12, 1000) // s - m(14, 1) // ms - - return Duration(dur), overflowErr -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - r = "" - ) - if ms == 0 { - return "0s" - } - - f := func(unit string, mult int64, exact bool) { - if exact && ms%mult != 0 { - return - } - if v := ms / mult; v > 0 { - r += fmt.Sprintf("%d%s", v, unit) - ms -= v * mult - } - } - - // Only format years and weeks if the remainder is zero, as it is often - // easier to read 90d than 12w6d. - f("y", 1000*60*60*24*365, true) - f("w", 1000*60*60*24*7, true) - - f("d", 1000*60*60*24, false) - f("h", 1000*60*60, false) - f("m", 1000*60, false) - f("s", 1000, false) - f("ms", 1, false) - - return r -} - -// MarshalJSON implements the json.Marshaler interface. -func (d Duration) MarshalJSON() ([]byte, error) { - return json.Marshal(d.String()) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (d *Duration) UnmarshalJSON(bytes []byte) error { - var s string - if err := json.Unmarshal(bytes, &s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} - -// MarshalText implements the encoding.TextMarshaler interface. -func (d *Duration) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -func (d *Duration) UnmarshalText(text []byte) error { - var err error - *d, err = ParseDuration(string(text)) - return err -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index c9d8fb1a2..000000000 --- a/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "math" - "sort" - "strconv" - "strings" -) - -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 7cc33ae4a..000000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/testdata/fixtures/ -/fixtures diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml deleted file mode 100644 index a197699a1..000000000 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -linters: - enable: - - godot - - revive - -linter-settings: - godot: - capital: true - exclude: - # Ignore "See: URL" - - 'See:' diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md deleted file mode 100644 index d325872bd..000000000 --- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,3 +0,0 @@ -# Prometheus Community Code of Conduct - -Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md). diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 853eb9d49..000000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,121 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) a suitable maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). - -* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) - -## Steps to Contribute - -Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. - -Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). - -For quickly compiling and testing your changes do: -``` -make test # Make sure all the tests pass before you commit and push :) -``` - -We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. - -## Pull Request Checklist - -* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. - -* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). - -* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). - -* Add tests relevant to the fixed bug or new feature. - -## Dependency management - -The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. - -All dependencies are vendored in the `vendor/` directory. - -To add or update a new dependency, use the `go get` command: - -```bash -# Pick the latest tagged release. -go get example.com/some/module/pkg - -# Pick a specific version. -go get example.com/some/module/pkg@vX.Y.Z -``` - -Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: - - -```bash -# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. -GO111MODULE=on go mod tidy - -GO111MODULE=on go mod vendor -``` - -You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. - - -## API Implementation Guidelines - -### Naming and Documentation - -Public functions and structs should normally be named according to the file(s) being read and parsed. For example, -the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function -should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). - -### Reading vs. Parsing - -Most functionality in this library consists of reading files and then parsing the text into structured data. In most -cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and -a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested -directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types -such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files -in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. - -### /proc and /sys filesystem I/O - -The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. -Many of the files are changing continuously and the data being read can in some cases change between subsequent -reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls -to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the -full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of -the file. - -Note that parsing the file's contents can still be performed one line at a time. This is done by first reading -the full file, and then using a scanner on the `[]byte` or `string` containing the data. - -``` - data, err := util.ReadFileNoStat("/proc/cpuinfo") - if err != nil { - return err - } - reader := bytes.NewReader(data) - scanner := bufio.NewScanner(reader) -``` - -The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does -not bother to check the size of the file before reading. -``` - data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") -``` - diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index 56ba67d3e..000000000 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1,2 +0,0 @@ -* Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 7edfe4d09..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -%/.unpacked: %.ttar - @echo ">> extracting fixtures $*" - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -fixtures: testdata/fixtures/.unpacked - -update_fixtures: - rm -vf testdata/fixtures/.unpacked - ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/ - -.PHONY: build -build: - -.PHONY: test -test: testdata/fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common deleted file mode 100644 index e358db69c..000000000 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -GOTEST := $(GO) test -GOTEST_DIR := -ifneq ($(CIRCLE_JOB),) -ifneq ($(shell which gotestsum),) - GOTEST_DIR := test-results - GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml -- -endif -endif - -PROMU_VERSION ?= 0.14.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -SKIP_GOLANGCI_LINT := -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.49.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - # If we're in CI and there is an Actions file, that means the linter - # is being run in Actions, so we don't need to run it here. - ifneq (,$(SKIP_GOLANGCI_LINT)) - GOLANGCI_LINT := - else ifeq (,$(CIRCLE_JOB)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - else ifeq (,$(wildcard .github/workflows/golangci-lint.yml)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./Dockerfile -DOCKERBUILD_CONTEXT ?= ./ -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint yamllint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" - $(GO) mod download - -.PHONY: update-go-deps -update-go-deps: - @echo ">> updating Go dependencies" - @for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \ - $(GO) get -d $$m; \ - done - $(GO) mod tidy - -.PHONY: common-test-short -common-test-short: $(GOTEST_DIR) - @echo ">> running short tests" - $(GOTEST) -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: $(GOTEST_DIR) - @echo ">> running all tests" - $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs) - -$(GOTEST_DIR): - @mkdir -p $@ - -.PHONY: common-format -common-format: - @echo ">> formatting code" - $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -endif - -.PHONY: common-yamllint -common-yamllint: - @echo ">> running yamllint on all YAML files in the repository" -ifeq (, $(shell which yamllint)) - @echo "yamllint not installed so skipping" -else - yamllint . -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: - @echo ">> running check for unused/missing packages in go.mod" - $(GO) mod tidy - @git diff --exit-code -- go.sum go.mod - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - -f $(DOCKERFILE_PATH) \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERBUILD_CONTEXT) - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" - -DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ - | sed -e '/install -d/d' \ - | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9aa1..000000000 --- a/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 43c37735a..000000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# procfs - -This package provides functions to retrieve system, kernel, and process -metrics from the pseudo-filesystems /proc and /sys. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs) -[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) - -## Usage - -The procfs library is organized by packages based on whether the gathered data is coming from -/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, -/sys, or both. For example, cpu statistics are gathered from -`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount -point is initialized, and then the stat information is read. - -```go -fs, err := procfs.NewFS("/proc") -stats, err := fs.Stat() -``` - -Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. - -```go - fs, err := blockdevice.NewFS("/proc", "/sys") - stats, err := fs.ProcDiskstats() -``` - -## Package Organization - -The packages in this project are organized according to (1) whether the data comes from the `/proc` or -`/sys` filesystem and (2) the type of information being retrieved. For example, most process information -can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives -is available in the `blockdevices` sub-package. - -## Building and Testing - -The procfs library is intended to be built as part of another application, so there are no distributable binaries. -However, most of the API includes unit tests which can be run with `make test`. - -### Updating Test Fixtures - -The procfs library includes a set of test fixtures which include many example files from -the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file -which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. - -```bash -rm -rf fixtures -make test -``` - -Next, make the required changes to the extracted files in the `fixtures` directory. When -the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file -based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md deleted file mode 100644 index fed02d85c..000000000 --- a/vendor/github.com/prometheus/procfs/SECURITY.md +++ /dev/null @@ -1,6 +0,0 @@ -# Reporting a security issue - -The Prometheus security policy, including how to report vulnerabilities, can be -found here: - - diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go deleted file mode 100644 index 68f36e888..000000000 --- a/vendor/github.com/prometheus/procfs/arp.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "net" - "os" - "strconv" - "strings" -) - -// Learned from include/uapi/linux/if_arp.h. -const ( - // completed entry (ha valid). - ATFComplete = 0x02 - // permanent entry. - ATFPermanent = 0x04 - // Publish entry. - ATFPublish = 0x08 - // Has requested trailers. - ATFUseTrailers = 0x10 - // Obsoleted: Want to use a netmask (only for proxy entries). - ATFNetmask = 0x20 - // Don't answer this addresses. - ATFDontPublish = 0x40 -) - -// ARPEntry contains a single row of the columnar data represented in -// /proc/net/arp. -type ARPEntry struct { - // IP address - IPAddr net.IP - // MAC address - HWAddr net.HardwareAddr - // Name of the device - Device string - // Flags - Flags byte -} - -// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, -// and then return a slice of ARPEntry's. -func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := os.ReadFile(fs.proc.Path("net/arp")) - if err != nil { - return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err) - } - - return parseARPEntries(data) -} - -func parseARPEntries(data []byte) ([]ARPEntry, error) { - lines := strings.Split(string(data), "\n") - entries := make([]ARPEntry, 0) - var err error - const ( - expectedDataWidth = 6 - expectedHeaderWidth = 9 - ) - for _, line := range lines { - columns := strings.Fields(line) - width := len(columns) - - if width == expectedHeaderWidth || width == 0 { - continue - } else if width == expectedDataWidth { - entry, err := parseARPEntry(columns) - if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err) - } - entries = append(entries, entry) - } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) - } - - } - - return entries, err -} - -func parseARPEntry(columns []string) (ARPEntry, error) { - entry := ARPEntry{Device: columns[5]} - ip := net.ParseIP(columns[0]) - entry.IPAddr = ip - - if mac, err := net.ParseMAC(columns[3]); err == nil { - entry.HWAddr = mac - } else { - return ARPEntry{}, err - } - - if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil { - entry.Flags = byte(flags) - } else { - return ARPEntry{}, err - } - - return entry, nil -} - -// IsComplete returns true if ARP entry is marked with complete flag. -func (entry *ARPEntry) IsComplete() bool { - return entry.Flags&ATFComplete != 0 -} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go deleted file mode 100644 index f5b7939b2..000000000 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// A BuddyInfo is the details parsed from /proc/buddyinfo. -// The data is comprised of an array of free fragments of each size. -// The sizes are 2^n*PAGE_SIZE, where n is the array index. -type BuddyInfo struct { - Node string - Zone string - Sizes []float64 -} - -// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) BuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.proc.Path("buddyinfo")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseBuddyInfo(file) -} - -func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { - var ( - buddyInfo = []BuddyInfo{} - scanner = bufio.NewScanner(r) - bucketCount = -1 - ) - - for scanner.Scan() { - var err error - line := scanner.Text() - parts := strings.Fields(line) - - if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") - } - - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") - arraySize := len(parts[4:]) - - if bucketCount == -1 { - bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) - } - } - - sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { - sizes[i], err = strconv.ParseFloat(parts[i+4], 64) - if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %w", err) - } - } - - buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) - } - - return buddyInfo, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go deleted file mode 100644 index bf4f3b48c..000000000 --- a/vendor/github.com/prometheus/procfs/cmdline.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CmdLine returns the command line of the kernel. -func (fs FS) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cmdline")) - if err != nil { - return nil, err - } - - return strings.Fields(string(data)), nil -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go deleted file mode 100644 index 06968ca2e..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ /dev/null @@ -1,518 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo. -type CPUInfo struct { - Processor uint - VendorID string - CPUFamily string - Model string - ModelName string - Stepping string - Microcode string - CPUMHz float64 - CacheSize string - PhysicalID string - Siblings uint - CoreID string - CPUCores uint - APICID string - InitialAPICID string - FPU string - FPUException string - CPUIDLevel uint - WP string - Flags []string - Bugs []string - BogoMips float64 - CLFlushSize uint - CacheAlignment uint - AddressSizes string - PowerManagement string -} - -var ( - cpuinfoClockRegexp = regexp.MustCompile(`([\d.]+)`) - cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`) -) - -// CPUInfo returns information about current system CPUs. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) CPUInfo() ([]CPUInfo, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) - if err != nil { - return nil, err - } - return parseCPUInfo(data) -} - -func parseCPUInfoX86(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "vendor", "vendor_id": - cpuinfo[i].VendorID = field[1] - case "cpu family": - cpuinfo[i].CPUFamily = field[1] - case "model": - cpuinfo[i].Model = field[1] - case "model name": - cpuinfo[i].ModelName = field[1] - case "stepping": - cpuinfo[i].Stepping = field[1] - case "microcode": - cpuinfo[i].Microcode = field[1] - case "cpu MHz": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "cache size": - cpuinfo[i].CacheSize = field[1] - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "apicid": - cpuinfo[i].APICID = field[1] - case "initial apicid": - cpuinfo[i].InitialAPICID = field[1] - case "fpu": - cpuinfo[i].FPU = field[1] - case "fpu_exception": - cpuinfo[i].FPUException = field[1] - case "cpuid level": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUIDLevel = uint(v) - case "wp": - cpuinfo[i].WP = field[1] - case "flags": - cpuinfo[i].Flags = strings.Fields(field[1]) - case "bugs": - cpuinfo[i].Bugs = strings.Fields(field[1]) - case "bogomips": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "clflush size": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CLFlushSize = uint(v) - case "cache_alignment": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CacheAlignment = uint(v) - case "address sizes": - cpuinfo[i].AddressSizes = field[1] - case "power management": - cpuinfo[i].PowerManagement = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoARM(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - match, _ := regexp.MatchString("^[Pp]rocessor", firstLine) - if !match || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - featuresLine := "" - commonCPUInfo := CPUInfo{} - i := 0 - if strings.TrimSpace(field[0]) == "Processor" { - commonCPUInfo = CPUInfo{ModelName: field[1]} - i = -1 - } else { - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo = []CPUInfo{firstcpu} - } - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "BogoMIPS": - if i == -1 { - cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor - i++ - cpuinfo[i].Processor = 0 - } - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "Features": - featuresLine = line - case "model name": - cpuinfo[i].ModelName = field[1] - } - } - fields := strings.SplitN(featuresLine, ": ", 2) - for i := range cpuinfo { - cpuinfo[i].Flags = strings.Fields(fields[1]) - } - return cpuinfo, nil - -} - -func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - commonCPUInfo := CPUInfo{VendorID: field[1]} - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "bogomips per cpu": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - commonCPUInfo.BogoMips = v - case "features": - commonCPUInfo.Flags = strings.Fields(field[1]) - } - if strings.HasPrefix(line, "processor") { - match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line) - if len(match) < 2 { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - cpu := commonCPUInfo - v, err := strconv.ParseUint(match[1], 0, 32) - if err != nil { - return nil, err - } - cpu.Processor = uint(v) - cpuinfo = append(cpuinfo, cpu) - } - if strings.HasPrefix(line, "cpu number") { - break - } - } - - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "cpu number": - i++ - case "cpu MHz dynamic": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - } - } - - return cpuinfo, nil -} - -func parseCPUInfoMips(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "cpu model": - cpuinfo[i].ModelName = field[1] - case "BogoMIPS": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - // find the first "processor" line - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") { - return nil, errors.New("invalid cpuinfo file: " + firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - cpuinfo := []CPUInfo{} - systemType := field[1] - i := 0 - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - cpuinfo[i].VendorID = systemType - case "CPU Family": - cpuinfo[i].CPUFamily = field[1] - case "Model Name": - cpuinfo[i].ModelName = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "cpu": - cpuinfo[i].VendorID = field[1] - case "clock": - clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1])) - v, err := strconv.ParseFloat(clock, 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - } - } - return cpuinfo, nil -} - -func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) { - scanner := bufio.NewScanner(bytes.NewReader(info)) - - firstLine := firstNonEmptyLine(scanner) - if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") { - return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine) - } - field := strings.SplitN(firstLine, ": ", 2) - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - firstcpu := CPUInfo{Processor: uint(v)} - cpuinfo := []CPUInfo{firstcpu} - i := 0 - - for scanner.Scan() { - line := scanner.Text() - if !strings.Contains(line, ":") { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - i = int(v) - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - cpuinfo[i].Processor = uint(v) - case "hart": - cpuinfo[i].CoreID = field[1] - case "isa": - cpuinfo[i].ModelName = field[1] - } - } - return cpuinfo, nil -} - -func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode - return nil, errors.New("not implemented") -} - -// firstNonEmptyLine advances the scanner to the first non-empty line -// and returns the contents of that line. -func firstNonEmptyLine(scanner *bufio.Scanner) string { - for scanner.Scan() { - line := scanner.Text() - if strings.TrimSpace(line) != "" { - return line - } - } - return "" -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go deleted file mode 100644 index 64cfd534c..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (arm || arm64) -// +build linux -// +build arm arm64 - -package procfs - -var parseCPUInfo = parseCPUInfoARM diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go deleted file mode 100644 index d88442f0e..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoLoong diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go deleted file mode 100644 index c11207f3a..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (mips || mipsle || mips64 || mips64le) -// +build linux -// +build mips mipsle mips64 mips64le - -package procfs - -var parseCPUInfo = parseCPUInfoMips diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go deleted file mode 100644 index a6b2b3127..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x -// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x - -package procfs - -var parseCPUInfo = parseCPUInfoDummy diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go deleted file mode 100644 index 003bc2ad4..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (ppc64 || ppc64le) -// +build linux -// +build ppc64 ppc64le - -package procfs - -var parseCPUInfo = parseCPUInfoPPC diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go deleted file mode 100644 index 1c9b7313b..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (riscv || riscv64) -// +build linux -// +build riscv riscv64 - -package procfs - -var parseCPUInfo = parseCPUInfoRISCV diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go deleted file mode 100644 index fa3686bc0..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux -// +build linux - -package procfs - -var parseCPUInfo = parseCPUInfoS390X diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go deleted file mode 100644 index a0ef55562..000000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build linux && (386 || amd64) -// +build linux -// +build 386 amd64 - -package procfs - -var parseCPUInfo = parseCPUInfoX86 diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go deleted file mode 100644 index 5048ad1f2..000000000 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Crypto holds info parsed from /proc/crypto. -type Crypto struct { - Alignmask *uint64 - Async bool - Blocksize *uint64 - Chunksize *uint64 - Ctxsize *uint64 - Digestsize *uint64 - Driver string - Geniv string - Internal string - Ivsize *uint64 - Maxauthsize *uint64 - MaxKeysize *uint64 - MinKeysize *uint64 - Module string - Name string - Priority *int64 - Refcnt *int64 - Seedsize *uint64 - Selftest string - Type string - Walksize *uint64 -} - -// Crypto parses an crypto-file (/proc/crypto) and returns a slice of -// structs containing the relevant info. More information available here: -// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html -func (fs FS) Crypto() ([]Crypto, error) { - path := fs.proc.Path("crypto") - b, err := util.ReadFileNoStat(path) - if err != nil { - return nil, fmt.Errorf("error reading crypto %q: %w", path, err) - } - - crypto, err := parseCrypto(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("error parsing crypto %q: %w", path, err) - } - - return crypto, nil -} - -// parseCrypto parses a /proc/crypto stream into Crypto elements. -func parseCrypto(r io.Reader) ([]Crypto, error) { - var out []Crypto - - s := bufio.NewScanner(r) - for s.Scan() { - text := s.Text() - switch { - case strings.HasPrefix(text, "name"): - // Each crypto element begins with its name. - out = append(out, Crypto{}) - case text == "": - continue - } - - kv := strings.Split(text, ":") - if len(kv) != 2 { - return nil, fmt.Errorf("malformed crypto line: %q", text) - } - - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - - // Parse the key/value pair into the currently focused element. - c := &out[len(out)-1] - if err := c.parseKV(k, v); err != nil { - return nil, err - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return out, nil -} - -// parseKV parses a key/value pair into the appropriate field of c. -func (c *Crypto) parseKV(k, v string) error { - vp := util.NewValueParser(v) - - switch k { - case "async": - // Interpret literal yes as true. - c.Async = v == "yes" - case "blocksize": - c.Blocksize = vp.PUInt64() - case "chunksize": - c.Chunksize = vp.PUInt64() - case "digestsize": - c.Digestsize = vp.PUInt64() - case "driver": - c.Driver = v - case "geniv": - c.Geniv = v - case "internal": - c.Internal = v - case "ivsize": - c.Ivsize = vp.PUInt64() - case "maxauthsize": - c.Maxauthsize = vp.PUInt64() - case "max keysize": - c.MaxKeysize = vp.PUInt64() - case "min keysize": - c.MinKeysize = vp.PUInt64() - case "module": - c.Module = v - case "name": - c.Name = v - case "priority": - c.Priority = vp.PInt64() - case "refcnt": - c.Refcnt = vp.PInt64() - case "seedsize": - c.Seedsize = vp.PUInt64() - case "selftest": - c.Selftest = v - case "type": - c.Type = v - case "walksize": - c.Walksize = vp.PUInt64() - } - - return vp.Err() -} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index f9d961e44..000000000 --- a/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.Stat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -package procfs diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 0102ab0fd..000000000 --- a/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "github.com/prometheus/procfs/internal/fs" -) - -// FS represents the pseudo-filesystem sys, which provides an interface to -// kernel data structures. -type FS struct { - proc fs.FS -} - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint - -// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. -// It will error if the mount point directory can't be read or is a file. -func NewDefaultFS() (FS, error) { - return NewFS(DefaultMountPoint) -} - -// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error -// if the mount point directory can't be read or is a file. -func NewFS(mountPoint string) (FS, error) { - fs, err := fs.NewFS(mountPoint) - if err != nil { - return FS{}, err - } - return FS{fs}, nil -} diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go deleted file mode 100644 index f8070e6e2..000000000 --- a/vendor/github.com/prometheus/procfs/fscache.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Fscacheinfo represents fscache statistics. -type Fscacheinfo struct { - // Number of index cookies allocated - IndexCookiesAllocated uint64 - // data storage cookies allocated - DataStorageCookiesAllocated uint64 - // Number of special cookies allocated - SpecialCookiesAllocated uint64 - // Number of objects allocated - ObjectsAllocated uint64 - // Number of object allocation failures - ObjectAllocationsFailure uint64 - // Number of objects that reached the available state - ObjectsAvailable uint64 - // Number of objects that reached the dead state - ObjectsDead uint64 - // Number of objects that didn't have a coherency check - ObjectsWithoutCoherencyCheck uint64 - // Number of objects that passed a coherency check - ObjectsWithCoherencyCheck uint64 - // Number of objects that needed a coherency data update - ObjectsNeedCoherencyCheckUpdate uint64 - // Number of objects that were declared obsolete - ObjectsDeclaredObsolete uint64 - // Number of pages marked as being cached - PagesMarkedAsBeingCached uint64 - // Number of uncache page requests seen - UncachePagesRequestSeen uint64 - // Number of acquire cookie requests seen - AcquireCookiesRequestSeen uint64 - // Number of acq reqs given a NULL parent - AcquireRequestsWithNullParent uint64 - // Number of acq reqs rejected due to no cache available - AcquireRequestsRejectedNoCacheAvailable uint64 - // Number of acq reqs succeeded - AcquireRequestsSucceeded uint64 - // Number of acq reqs rejected due to error - AcquireRequestsRejectedDueToError uint64 - // Number of acq reqs failed on ENOMEM - AcquireRequestsFailedDueToEnomem uint64 - // Number of lookup calls made on cache backends - LookupsNumber uint64 - // Number of negative lookups made - LookupsNegative uint64 - // Number of positive lookups made - LookupsPositive uint64 - // Number of objects created by lookup - ObjectsCreatedByLookup uint64 - // Number of lookups timed out and requeued - LookupsTimedOutAndRequed uint64 - InvalidationsNumber uint64 - InvalidationsRunning uint64 - // Number of update cookie requests seen - UpdateCookieRequestSeen uint64 - // Number of upd reqs given a NULL parent - UpdateRequestsWithNullParent uint64 - // Number of upd reqs granted CPU time - UpdateRequestsRunning uint64 - // Number of relinquish cookie requests seen - RelinquishCookiesRequestSeen uint64 - // Number of rlq reqs given a NULL parent - RelinquishCookiesWithNullParent uint64 - // Number of rlq reqs waited on completion of creation - RelinquishRequestsWaitingCompleteCreation uint64 - // Relinqs rtr - RelinquishRetries uint64 - // Number of attribute changed requests seen - AttributeChangedRequestsSeen uint64 - // Number of attr changed requests queued - AttributeChangedRequestsQueued uint64 - // Number of attr changed rejected -ENOBUFS - AttributeChangedRejectDueToEnobufs uint64 - // Number of attr changed failed -ENOMEM - AttributeChangedFailedDueToEnomem uint64 - // Number of attr changed ops given CPU time - AttributeChangedOps uint64 - // Number of allocation requests seen - AllocationRequestsSeen uint64 - // Number of successful alloc reqs - AllocationOkRequests uint64 - // Number of alloc reqs that waited on lookup completion - AllocationWaitingOnLookup uint64 - // Number of alloc reqs rejected -ENOBUFS - AllocationsRejectedDueToEnobufs uint64 - // Number of alloc reqs aborted -ERESTARTSYS - AllocationsAbortedDueToErestartsys uint64 - // Number of alloc reqs submitted - AllocationOperationsSubmitted uint64 - // Number of alloc reqs waited for CPU time - AllocationsWaitedForCPU uint64 - // Number of alloc reqs aborted due to object death - AllocationsAbortedDueToObjectDeath uint64 - // Number of retrieval (read) requests seen - RetrievalsReadRequests uint64 - // Number of successful retr reqs - RetrievalsOk uint64 - // Number of retr reqs that waited on lookup completion - RetrievalsWaitingLookupCompletion uint64 - // Number of retr reqs returned -ENODATA - RetrievalsReturnedEnodata uint64 - // Number of retr reqs rejected -ENOBUFS - RetrievalsRejectedDueToEnobufs uint64 - // Number of retr reqs aborted -ERESTARTSYS - RetrievalsAbortedDueToErestartsys uint64 - // Number of retr reqs failed -ENOMEM - RetrievalsFailedDueToEnomem uint64 - // Number of retr reqs submitted - RetrievalsRequests uint64 - // Number of retr reqs waited for CPU time - RetrievalsWaitingCPU uint64 - // Number of retr reqs aborted due to object death - RetrievalsAbortedDueToObjectDeath uint64 - // Number of storage (write) requests seen - StoreWriteRequests uint64 - // Number of successful store reqs - StoreSuccessfulRequests uint64 - // Number of store reqs on a page already pending storage - StoreRequestsOnPendingStorage uint64 - // Number of store reqs rejected -ENOBUFS - StoreRequestsRejectedDueToEnobufs uint64 - // Number of store reqs failed -ENOMEM - StoreRequestsFailedDueToEnomem uint64 - // Number of store reqs submitted - StoreRequestsSubmitted uint64 - // Number of store reqs granted CPU time - StoreRequestsRunning uint64 - // Number of pages given store req processing time - StorePagesWithRequestsProcessing uint64 - // Number of store reqs deleted from tracking tree - StoreRequestsDeleted uint64 - // Number of store reqs over store limit - StoreRequestsOverStoreLimit uint64 - // Number of release reqs against pages with no pending store - ReleaseRequestsAgainstPagesWithNoPendingStorage uint64 - // Number of release reqs against pages stored by time lock granted - ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64 - // Number of release reqs ignored due to in-progress store - ReleaseRequestsIgnoredDueToInProgressStore uint64 - // Number of page stores cancelled due to release req - PageStoresCancelledByReleaseRequests uint64 - VmscanWaiting uint64 - // Number of times async ops added to pending queues - OpsPending uint64 - // Number of times async ops given CPU time - OpsRunning uint64 - // Number of times async ops queued for processing - OpsEnqueued uint64 - // Number of async ops cancelled - OpsCancelled uint64 - // Number of async ops rejected due to object lookup/create failure - OpsRejected uint64 - // Number of async ops initialised - OpsInitialised uint64 - // Number of async ops queued for deferred release - OpsDeferred uint64 - // Number of async ops released (should equal ini=N when idle) - OpsReleased uint64 - // Number of deferred-release async ops garbage collected - OpsGarbageCollected uint64 - // Number of in-progress alloc_object() cache ops - CacheopAllocationsinProgress uint64 - // Number of in-progress lookup_object() cache ops - CacheopLookupObjectInProgress uint64 - // Number of in-progress lookup_complete() cache ops - CacheopLookupCompleteInPorgress uint64 - // Number of in-progress grab_object() cache ops - CacheopGrabObjectInProgress uint64 - CacheopInvalidations uint64 - // Number of in-progress update_object() cache ops - CacheopUpdateObjectInProgress uint64 - // Number of in-progress drop_object() cache ops - CacheopDropObjectInProgress uint64 - // Number of in-progress put_object() cache ops - CacheopPutObjectInProgress uint64 - // Number of in-progress attr_changed() cache ops - CacheopAttributeChangeInProgress uint64 - // Number of in-progress sync_cache() cache ops - CacheopSyncCacheInProgress uint64 - // Number of in-progress read_or_alloc_page() cache ops - CacheopReadOrAllocPageInProgress uint64 - // Number of in-progress read_or_alloc_pages() cache ops - CacheopReadOrAllocPagesInProgress uint64 - // Number of in-progress allocate_page() cache ops - CacheopAllocatePageInProgress uint64 - // Number of in-progress allocate_pages() cache ops - CacheopAllocatePagesInProgress uint64 - // Number of in-progress write_page() cache ops - CacheopWritePagesInProgress uint64 - // Number of in-progress uncache_page() cache ops - CacheopUncachePagesInProgress uint64 - // Number of in-progress dissociate_pages() cache ops - CacheopDissociatePagesInProgress uint64 - // Number of object lookups/creations rejected due to lack of space - CacheevLookupsAndCreationsRejectedLackSpace uint64 - // Number of stale objects deleted - CacheevStaleObjectsDeleted uint64 - // Number of objects retired when relinquished - CacheevRetiredWhenReliquished uint64 - // Number of objects culled - CacheevObjectsCulled uint64 -} - -// Fscacheinfo returns information about current fscache statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt -func (fs FS) Fscacheinfo() (Fscacheinfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats")) - if err != nil { - return Fscacheinfo{}, err - } - - m, err := parseFscacheinfo(bytes.NewReader(b)) - if err != nil { - return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err) - } - - return *m, nil -} - -func setFSCacheFields(fields []string, setFields ...*uint64) error { - var err error - if len(fields) < len(setFields) { - return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields)) - } - - for i := range setFields { - *setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64) - if err != nil { - return err - } - } - return nil -} - -func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) { - var m Fscacheinfo - s := bufio.NewScanner(r) - for s.Scan() { - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text()) - } - - switch fields[0] { - case "Cookies:": - err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated, - &m.SpecialCookiesAllocated) - if err != nil { - return &m, err - } - case "Objects:": - err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure, - &m.ObjectsAvailable, &m.ObjectsDead) - if err != nil { - return &m, err - } - case "ChkAux": - err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck, - &m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete) - if err != nil { - return &m, err - } - case "Pages": - err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen) - if err != nil { - return &m, err - } - case "Acquire:": - err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent, - &m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError, - &m.AcquireRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - case "Lookups:": - err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive, - &m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed) - if err != nil { - return &m, err - } - case "Invals": - err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning) - if err != nil { - return &m, err - } - case "Updates:": - err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent, - &m.UpdateRequestsRunning) - if err != nil { - return &m, err - } - case "Relinqs:": - err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent, - &m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries) - if err != nil { - return &m, err - } - case "AttrChg:": - err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued, - &m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps) - if err != nil { - return &m, err - } - case "Allocs": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests, - &m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU, - &m.AllocationsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Retrvls:": - if strings.Split(fields[1], "=")[0] == "n" { - err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion, - &m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys, - &m.RetrievalsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath) - if err != nil { - return &m, err - } - } - case "Stores": - if strings.Split(fields[2], "=")[0] == "n" { - err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests, - &m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning, - &m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit) - if err != nil { - return &m, err - } - } - case "VmScan": - err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage, - &m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore, - &m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting) - if err != nil { - return &m, err - } - case "Ops": - if strings.Split(fields[2], "=")[0] == "pend" { - err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected) - if err != nil { - return &m, err - } - } - case "CacheOp:": - if strings.Split(fields[1], "=")[0] == "alo" { - err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress, - &m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress) - if err != nil { - return &m, err - } - } else if strings.Split(fields[1], "=")[0] == "inv" { - err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress, - &m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress, - &m.CacheopSyncCacheInProgress) - if err != nil { - return &m, err - } - } else { - err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress, - &m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress, - &m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress) - if err != nil { - return &m, err - } - } - case "CacheEv:": - err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted, - &m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled) - if err != nil { - return &m, err - } - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go deleted file mode 100644 index 3c18c7610..000000000 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fs - -import ( - "fmt" - "os" - "path/filepath" -) - -const ( - // DefaultProcMountPoint is the common mount point of the proc filesystem. - DefaultProcMountPoint = "/proc" - - // DefaultSysMountPoint is the common mount point of the sys filesystem. - DefaultSysMountPoint = "/sys" - - // DefaultConfigfsMountPoint is the common mount point of the configfs. - DefaultConfigfsMountPoint = "/sys/kernel/config" -) - -// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an -// interface to kernel data structures. -type FS string - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %q: %w", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %q is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path appends the given path elements to the filesystem path, adding separators -// as necessary. -func (fs FS) Path(p ...string) string { - return filepath.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index b030951fa..000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "os" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ParsePInt64s parses a slice of strings into a slice of int64 pointers. -func ParsePInt64s(ss []string) ([]*int64, error) { - us := make([]*int64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, &u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// ReadIntFromFile reads a file and attempts to parse a int64 from it. -func ReadIntFromFile(path string) (int64, error) { - data, err := os.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) -} - -// ParseBool parses a string into a boolean pointer. -func ParseBool(b string) *bool { - var truth bool - switch b { - case "enabled": - truth = true - case "disabled": - truth = false - default: - return nil - } - return &truth -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go deleted file mode 100644 index 71b7a70eb..000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io" - "os" -) - -// ReadFileNoStat uses io.ReadAll to read contents of entire file. -// This is similar to os.ReadFile but without the call to os.Stat, because -// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 1024kB. For files larger than this, a scanner -// should be used. -func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 1024 - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - - reader := io.LimitReader(f, maxBufferSize) - return io.ReadAll(reader) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go deleted file mode 100644 index 1ab875cee..000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (linux || darwin) && !appengine -// +build linux darwin -// +build !appengine - -package util - -import ( - "bytes" - "os" - "syscall" -) - -// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -// -// Note that this function will not read files larger than 128 bytes. -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's os.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - const sysFileBufferSize = 128 - b := make([]byte, sysFileBufferSize) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go deleted file mode 100644 index 1d86f5e63..000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (linux && appengine) || (!linux && !darwin) -// +build linux,appengine !linux,!darwin - -package util - -import ( - "fmt" -) - -// SysReadFile is here implemented as a noop for builds that do not support -// the read syscall. For example Windows, or Linux on Google App Engine. -func SysReadFile(file string) (string, error) { - return "", fmt.Errorf("not supported on this platform") -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go deleted file mode 100644 index fe2355d3c..000000000 --- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "strconv" -) - -// TODO(mdlayher): util packages are an anti-pattern and this should be moved -// somewhere else that is more focused in the future. - -// A ValueParser enables parsing a single string into a variety of data types -// in a concise and safe way. The Err method must be invoked after invoking -// any other methods to ensure a value was successfully parsed. -type ValueParser struct { - v string - err error -} - -// NewValueParser creates a ValueParser using the input string. -func NewValueParser(v string) *ValueParser { - return &ValueParser{v: v} -} - -// Int interprets the underlying value as an int and returns that value. -func (vp *ValueParser) Int() int { return int(vp.int64()) } - -// PInt64 interprets the underlying value as an int64 and returns a pointer to -// that value. -func (vp *ValueParser) PInt64() *int64 { - if vp.err != nil { - return nil - } - - v := vp.int64() - return &v -} - -// int64 interprets the underlying value as an int64 and returns that value. -// TODO: export if/when necessary. -func (vp *ValueParser) int64() int64 { - if vp.err != nil { - return 0 - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseInt(vp.v, base, 64) - if err != nil { - vp.err = err - return 0 - } - - return v -} - -// PUInt64 interprets the underlying value as an uint64 and returns a pointer to -// that value. -func (vp *ValueParser) PUInt64() *uint64 { - if vp.err != nil { - return nil - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseUint(vp.v, base, 64) - if err != nil { - vp.err = err - return nil - } - - return &v -} - -// Err returns the last error, if any, encountered by the ValueParser. -func (vp *ValueParser) Err() error { - return vp.err -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index 391c07957..000000000 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The remote (real) port. - RemotePort uint16 - // The local firewall mark - LocalMark string - // The transport protocol (TCP, UDP). - Proto string - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) IPVSStats() (IPVSStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - - return parseIPVSStats(bytes.NewReader(data)) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(r io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := io.ReadAll(r) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localMark string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = "" - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "FWM": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = fields[1] - localAddress = nil - localPort = 0 - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - LocalMark: localMark, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - var ( - ip net.IP - err error - ) - - switch len(s) { - case 13: - ip, err = hex.DecodeString(s[0:8]) - if err != nil { - return nil, 0, err - } - case 46: - ip = net.ParseIP(s[1:40]) - if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) - } - default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) - } - - portString := s[len(s)-4:] - if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) - } - port, err := strconv.ParseUint(portString, 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go deleted file mode 100644 index db88566bd..000000000 --- a/vendor/github.com/prometheus/procfs/kernel_random.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "os" - - "github.com/prometheus/procfs/internal/util" -) - -// KernelRandom contains information about to the kernel's random number generator. -type KernelRandom struct { - // EntropyAvaliable gives the available entropy, in bits. - EntropyAvaliable *uint64 - // PoolSize gives the size of the entropy pool, in bits. - PoolSize *uint64 - // URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded. - URandomMinReseedSeconds *uint64 - // WriteWakeupThreshold the number of bits of entropy below which we wake up processes - // that do a select(2) or poll(2) for write access to /dev/random. - WriteWakeupThreshold *uint64 - // ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep - // waiting for entropy from /dev/random. - ReadWakeupThreshold *uint64 -} - -// KernelRandom returns values from /proc/sys/kernel/random. -func (fs FS) KernelRandom() (KernelRandom, error) { - random := KernelRandom{} - - for file, p := range map[string]**uint64{ - "entropy_avail": &random.EntropyAvaliable, - "poolsize": &random.PoolSize, - "urandom_min_reseed_secs": &random.URandomMinReseedSeconds, - "write_wakeup_threshold": &random.WriteWakeupThreshold, - "read_wakeup_threshold": &random.ReadWakeupThreshold, - } { - val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file)) - if os.IsNotExist(err) { - continue - } - if err != nil { - return random, err - } - *p = &val - } - - return random, nil -} diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go deleted file mode 100644 index 0096cafbd..000000000 --- a/vendor/github.com/prometheus/procfs/loadavg.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// LoadAvg represents an entry in /proc/loadavg. -type LoadAvg struct { - Load1 float64 - Load5 float64 - Load15 float64 -} - -// LoadAvg returns loadavg from /proc. -func (fs FS) LoadAvg() (*LoadAvg, error) { - path := fs.proc.Path("loadavg") - - data, err := util.ReadFileNoStat(path) - if err != nil { - return nil, err - } - return parseLoad(data) -} - -// Parse /proc loadavg and return 1m, 5m and 15m. -func parseLoad(loadavgBytes []byte) (*LoadAvg, error) { - loads := make([]float64, 3) - parts := strings.Fields(string(loadavgBytes)) - if len(parts) < 3 { - return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes)) - } - - var err error - for i, load := range parts[0:3] { - loads[i], err = strconv.ParseFloat(load, 64) - if err != nil { - return nil, fmt.Errorf("could not parse load %q: %w", load, err) - } - } - return &LoadAvg{ - Load1: loads[0], - Load5: loads[1], - Load15: loads[2], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index a95c889cb..000000000 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "regexp" - "strconv" - "strings" -) - -var ( - statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`) - recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`) - recoveryLinePctRE = regexp.MustCompile(`= (.+)%`) - recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`) - recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`) - componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device requires. - DisksTotal int64 - // Number of failed disks. - DisksFailed int64 - // Number of "down" disks. (the _ indicator in the status line) - DisksDown int64 - // Spare disks in the device. - DisksSpare int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 - // progress percentage of current sync - BlocksSyncedPct float64 - // estimated finishing time for current sync (in minutes) - BlocksSyncedFinishTime float64 - // current sync speed (in Kilobytes/sec) - BlocksSyncedSpeed float64 - // Name of md component devices - Devices []string -} - -// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. More information available here: -// https://raid.wiki.kernel.org/index.php/Mdstat -func (fs FS) MDStat() ([]MDStat, error) { - data, err := os.ReadFile(fs.proc.Path("mdstat")) - if err != nil { - return nil, err - } - mdstat, err := parseMDStat(data) - if err != nil { - return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err) - } - return mdstat, nil -} - -// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. -func parseMDStat(mdStatData []byte) ([]MDStat, error) { - mdStats := []MDStat{} - lines := strings.Split(string(mdStatData), "\n") - - for i, line := range lines { - if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || - strings.HasPrefix(line, "unused") { - continue - } - - deviceFields := strings.Fields(line) - if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) - } - mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive - - if len(lines) <= i+3 { - return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName) - } - - // Failed disks have the suffix (F) & Spare disks have the suffix (S). - fail := int64(strings.Count(line, "(F)")) - spare := int64(strings.Count(line, "(S)")) - active, total, down, size, err := evalStatusLine(lines[i], lines[i+1]) - - if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %w", err) - } - - syncLineIdx := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - syncLineIdx++ - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size - speed := float64(0) - finish := float64(0) - pct := float64(0) - recovering := strings.Contains(lines[syncLineIdx], "recovery") - resyncing := strings.Contains(lines[syncLineIdx], "resync") - checking := strings.Contains(lines[syncLineIdx], "check") - - // Append recovery and resyncing state info. - if recovering || resyncing || checking { - if recovering { - state = "recovering" - } else if checking { - state = "checking" - } else { - state = "resyncing" - } - - // Handle case when resync=PENDING or resync=DELAYED. - if strings.Contains(lines[syncLineIdx], "PENDING") || - strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 - } else { - syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx]) - if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err) - } - } - } - - mdStats = append(mdStats, MDStat{ - Name: mdName, - ActivityState: state, - DisksActive: active, - DisksFailed: fail, - DisksDown: down, - DisksSpare: spare, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - BlocksSyncedPct: pct, - BlocksSyncedFinishTime: finish, - BlocksSyncedSpeed: speed, - Devices: evalComponentDevices(deviceFields), - }) - } - - return mdStats, nil -} - -func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) { - statusFields := strings.Fields(statusLine) - if len(statusFields) < 1 { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine) - } - - sizeStr := statusFields[0] - size, err = strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) - } - - if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { - // In the device deviceLine, only disks have a number associated with them in []. - total = int64(strings.Count(deviceLine, "[")) - return total, total, 0, size, nil - } - - if strings.Contains(deviceLine, "inactive") { - return 0, 0, 0, size, nil - } - - matches := statusLineRE.FindStringSubmatch(statusLine) - if len(matches) != 5 { - return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err) - } - down = int64(strings.Count(matches[4], "_")) - - return active, total, down, size, nil -} - -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) { - matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) - } - - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err) - } - - // Get percentage complete - matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine) - } - pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64) - if err != nil { - return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) - } - - // Get time expected left to complete - matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine) - } - finish, err = strconv.ParseFloat(matches[1], 64) - if err != nil { - return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) - } - - // Get recovery speed - matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine) - } - speed, err = strconv.ParseFloat(matches[1], 64) - if err != nil { - return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err) - } - - return syncedBlocks, pct, finish, speed, nil -} - -func evalComponentDevices(deviceFields []string) []string { - mdComponentDevices := make([]string, 0) - if len(deviceFields) > 3 { - for _, field := range deviceFields[4:] { - match := componentDeviceRE.FindStringSubmatch(field) - if match == nil { - continue - } - mdComponentDevices = append(mdComponentDevices, match[1]) - } - } - - return mdComponentDevices -} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go deleted file mode 100644 index f65e174e5..000000000 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Meminfo represents memory statistics. -type Meminfo struct { - // Total usable ram (i.e. physical ram minus a few reserved - // bits and the kernel binary code) - MemTotal *uint64 - // The sum of LowFree+HighFree - MemFree *uint64 - // An estimate of how much memory is available for starting - // new applications, without swapping. Calculated from - // MemFree, SReclaimable, the size of the file LRU lists, and - // the low watermarks in each zone. The estimate takes into - // account that the system needs some page cache to function - // well, and that not all reclaimable slab will be - // reclaimable, due to items being in use. The impact of those - // factors will vary from system to system. - MemAvailable *uint64 - // Relatively temporary storage for raw disk blocks shouldn't - // get tremendously large (20MB or so) - Buffers *uint64 - Cached *uint64 - // Memory that once was swapped out, is swapped back in but - // still also is in the swapfile (if memory is needed it - // doesn't need to be swapped out AGAIN because it is already - // in the swapfile. This saves I/O) - SwapCached *uint64 - // Memory that has been used more recently and usually not - // reclaimed unless absolutely necessary. - Active *uint64 - // Memory which has been less recently used. It is more - // eligible to be reclaimed for other purposes - Inactive *uint64 - ActiveAnon *uint64 - InactiveAnon *uint64 - ActiveFile *uint64 - InactiveFile *uint64 - Unevictable *uint64 - Mlocked *uint64 - // total amount of swap space available - SwapTotal *uint64 - // Memory which has been evicted from RAM, and is temporarily - // on the disk - SwapFree *uint64 - // Memory which is waiting to get written back to the disk - Dirty *uint64 - // Memory which is actively being written back to the disk - Writeback *uint64 - // Non-file backed pages mapped into userspace page tables - AnonPages *uint64 - // files which have been mapped, such as libraries - Mapped *uint64 - Shmem *uint64 - // in-kernel data structures cache - Slab *uint64 - // Part of Slab, that might be reclaimed, such as caches - SReclaimable *uint64 - // Part of Slab, that cannot be reclaimed on memory pressure - SUnreclaim *uint64 - KernelStack *uint64 - // amount of memory dedicated to the lowest level of page - // tables. - PageTables *uint64 - // NFS pages sent to the server, but not yet committed to - // stable storage - NFSUnstable *uint64 - // Memory used for block device "bounce buffers" - Bounce *uint64 - // Memory used by FUSE for temporary writeback buffers - WritebackTmp *uint64 - // Based on the overcommit ratio ('vm.overcommit_ratio'), - // this is the total amount of memory currently available to - // be allocated on the system. This limit is only adhered to - // if strict overcommit accounting is enabled (mode 2 in - // 'vm.overcommit_memory'). - // The CommitLimit is calculated with the following formula: - // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * - // overcommit_ratio / 100 + [total swap pages] - // For example, on a system with 1G of physical RAM and 7G - // of swap with a `vm.overcommit_ratio` of 30 it would - // yield a CommitLimit of 7.3G. - // For more details, see the memory overcommit documentation - // in vm/overcommit-accounting. - CommitLimit *uint64 - // The amount of memory presently allocated on the system. - // The committed memory is a sum of all of the memory which - // has been allocated by processes, even if it has not been - // "used" by them as of yet. A process which malloc()'s 1G - // of memory, but only touches 300M of it will show up as - // using 1G. This 1G is memory which has been "committed" to - // by the VM and can be used at any time by the allocating - // application. With strict overcommit enabled on the system - // (mode 2 in 'vm.overcommit_memory'),allocations which would - // exceed the CommitLimit (detailed above) will not be permitted. - // This is useful if one needs to guarantee that processes will - // not fail due to lack of memory once that memory has been - // successfully allocated. - CommittedAS *uint64 - // total size of vmalloc memory area - VmallocTotal *uint64 - // amount of vmalloc area which is used - VmallocUsed *uint64 - // largest contiguous block of vmalloc area which is free - VmallocChunk *uint64 - HardwareCorrupted *uint64 - AnonHugePages *uint64 - ShmemHugePages *uint64 - ShmemPmdMapped *uint64 - CmaTotal *uint64 - CmaFree *uint64 - HugePagesTotal *uint64 - HugePagesFree *uint64 - HugePagesRsvd *uint64 - HugePagesSurp *uint64 - Hugepagesize *uint64 - DirectMap4k *uint64 - DirectMap2M *uint64 - DirectMap1G *uint64 -} - -// Meminfo returns an information about current kernel/system memory statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Meminfo() (Meminfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) - if err != nil { - return Meminfo{}, err - } - - m, err := parseMemInfo(bytes.NewReader(b)) - if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err) - } - - return *m, nil -} - -func parseMemInfo(r io.Reader) (*Meminfo, error) { - var m Meminfo - s := bufio.NewScanner(r) - for s.Scan() { - // Each line has at least a name and value; we ignore the unit. - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) - } - - v, err := strconv.ParseUint(fields[1], 0, 64) - if err != nil { - return nil, err - } - - switch fields[0] { - case "MemTotal:": - m.MemTotal = &v - case "MemFree:": - m.MemFree = &v - case "MemAvailable:": - m.MemAvailable = &v - case "Buffers:": - m.Buffers = &v - case "Cached:": - m.Cached = &v - case "SwapCached:": - m.SwapCached = &v - case "Active:": - m.Active = &v - case "Inactive:": - m.Inactive = &v - case "Active(anon):": - m.ActiveAnon = &v - case "Inactive(anon):": - m.InactiveAnon = &v - case "Active(file):": - m.ActiveFile = &v - case "Inactive(file):": - m.InactiveFile = &v - case "Unevictable:": - m.Unevictable = &v - case "Mlocked:": - m.Mlocked = &v - case "SwapTotal:": - m.SwapTotal = &v - case "SwapFree:": - m.SwapFree = &v - case "Dirty:": - m.Dirty = &v - case "Writeback:": - m.Writeback = &v - case "AnonPages:": - m.AnonPages = &v - case "Mapped:": - m.Mapped = &v - case "Shmem:": - m.Shmem = &v - case "Slab:": - m.Slab = &v - case "SReclaimable:": - m.SReclaimable = &v - case "SUnreclaim:": - m.SUnreclaim = &v - case "KernelStack:": - m.KernelStack = &v - case "PageTables:": - m.PageTables = &v - case "NFS_Unstable:": - m.NFSUnstable = &v - case "Bounce:": - m.Bounce = &v - case "WritebackTmp:": - m.WritebackTmp = &v - case "CommitLimit:": - m.CommitLimit = &v - case "Committed_AS:": - m.CommittedAS = &v - case "VmallocTotal:": - m.VmallocTotal = &v - case "VmallocUsed:": - m.VmallocUsed = &v - case "VmallocChunk:": - m.VmallocChunk = &v - case "HardwareCorrupted:": - m.HardwareCorrupted = &v - case "AnonHugePages:": - m.AnonHugePages = &v - case "ShmemHugePages:": - m.ShmemHugePages = &v - case "ShmemPmdMapped:": - m.ShmemPmdMapped = &v - case "CmaTotal:": - m.CmaTotal = &v - case "CmaFree:": - m.CmaFree = &v - case "HugePages_Total:": - m.HugePagesTotal = &v - case "HugePages_Free:": - m.HugePagesFree = &v - case "HugePages_Rsvd:": - m.HugePagesRsvd = &v - case "HugePages_Surp:": - m.HugePagesSurp = &v - case "Hugepagesize:": - m.Hugepagesize = &v - case "DirectMap4k:": - m.DirectMap4k = &v - case "DirectMap2M:": - m.DirectMap2M = &v - case "DirectMap1G:": - m.DirectMap1G = &v - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go deleted file mode 100644 index 59f4d5055..000000000 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A MountInfo is a type that describes the details, options -// for each mount, parsed from /proc/self/mountinfo. -// The fields described in each entry of /proc/self/mountinfo -// is described in the following man page. -// http://man7.org/linux/man-pages/man5/proc.5.html -type MountInfo struct { - // Unique ID for the mount - MountID int - // The ID of the parent mount - ParentID int - // The value of `st_dev` for the files on this FS - MajorMinorVer string - // The pathname of the directory in the FS that forms - // the root for this mount - Root string - // The pathname of the mount point relative to the root - MountPoint string - // Mount options - Options map[string]string - // Zero or more optional fields - OptionalFields map[string]string - // The Filesystem type - FSType string - // FS specific information or "none" - Source string - // Superblock options - SuperOptions map[string]string -} - -// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. -func parseMountInfo(info []byte) ([]*MountInfo, error) { - mounts := []*MountInfo{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseMountInfoString(mountString) - if err != nil { - return nil, err - } - mounts = append(mounts, parsedMounts) - } - - err := scanner.Err() - return mounts, err -} - -// Parses a mountinfo file line, and converts it to a MountInfo struct. -// An important check here is to see if the hyphen separator, as if it does not exist, -// it means that the line is malformed. -func parseMountInfoString(mountString string) (*MountInfo, error) { - var err error - - mountInfo := strings.Split(mountString, " ") - mountInfoLength := len(mountInfo) - if mountInfoLength < 10 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) - } - - if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) - } - - mount := &MountInfo{ - MajorMinorVer: mountInfo[2], - Root: mountInfo[3], - MountPoint: mountInfo[4], - Options: mountOptionsParser(mountInfo[5]), - OptionalFields: nil, - FSType: mountInfo[mountInfoLength-3], - Source: mountInfo[mountInfoLength-2], - SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), - } - - mount.MountID, err = strconv.Atoi(mountInfo[0]) - if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") - } - mount.ParentID, err = strconv.Atoi(mountInfo[1]) - if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") - } - // Has optional fields, which is a space separated list of values. - // Example: shared:2 master:7 - if mountInfo[6] != "" { - mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) - if err != nil { - return nil, err - } - } - return mount, nil -} - -// mountOptionsIsValidField checks a string against a valid list of optional fields keys. -func mountOptionsIsValidField(s string) bool { - switch s { - case - "shared", - "master", - "propagate_from", - "unbindable": - return true - } - return false -} - -// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. -func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { - optionalFields := make(map[string]string) - for _, field := range o { - optionSplit := strings.SplitN(field, ":", 2) - value := "" - if len(optionSplit) == 2 { - value = optionSplit[1] - } - if mountOptionsIsValidField(optionSplit[0]) { - optionalFields[optionSplit[0]] = value - } - } - return optionalFields, nil -} - -// mountOptionsParser parses the mount options, superblock options. -func mountOptionsParser(mountOptions string) map[string]string { - opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { - splitOption := strings.Split(opt, "=") - if len(splitOption) < 2 { - key := splitOption[0] - opts[key] = "" - } else { - key, value := splitOption[0], splitOption[1] - opts[key] = value - } - } - return opts -} - -// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`. -func GetMounts() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat("/proc/self/mountinfo") - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -// GetProcMounts retrieves mountinfo information from a processes' `/proc//mountinfo`. -func GetProcMounts(pid int) ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index 0c482c18c..000000000 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,639 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The mount options of the NFS mount. - Opts map[string]string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read uint64 - // Number of bytes written using the write() syscall. - Write uint64 - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead uint64 - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite uint64 - // Number of bytes read from the NFS server, in total. - ReadTotal uint64 - // Number of bytes written to the NFS server, in total. - WriteTotal uint64 - // Number of pages read directly via mmap()'d files. - ReadPages uint64 - // Number of pages written directly via mmap()'d files. - WritePages uint64 -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate uint64 - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate uint64 - // Number of times an inode cache is cleared. - DataInvalidate uint64 - // Number of times cached inode attributes are invalidated. - AttributeInvalidate uint64 - // Number of times files or directories have been open()'d. - VFSOpen uint64 - // Number of times a directory lookup has occurred. - VFSLookup uint64 - // Number of times permissions have been checked. - VFSAccess uint64 - // Number of updates (and potential writes) to pages. - VFSUpdatePage uint64 - // Number of pages read directly via mmap()'d files. - VFSReadPage uint64 - // Number of times a group of pages have been read. - VFSReadPages uint64 - // Number of pages written directly via mmap()'d files. - VFSWritePage uint64 - // Number of times a group of pages have been written. - VFSWritePages uint64 - // Number of times directory entries have been read with getdents(). - VFSGetdents uint64 - // Number of times attributes have been set on inodes. - VFSSetattr uint64 - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush uint64 - // Number of times fsync() has been called on directories and files. - VFSFsync uint64 - // Number of times locking has been attempted on a file. - VFSLock uint64 - // Number of times files have been closed and released. - VFSFileRelease uint64 - // Unknown. Possibly unused. - CongestionWait uint64 - // Number of times files have been truncated. - Truncation uint64 - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension uint64 - // Number of times a file was removed while still open by another process. - SillyRename uint64 - // Number of times the NFS server gave less data than expected while reading. - ShortRead uint64 - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite uint64 - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay uint64 - // Number of NFS v4.1+ pNFS reads. - PNFSRead uint64 - // Number of NFS v4.1+ pNFS writes. - PNFSWrite uint64 -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests uint64 - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions uint64 - // Number of times a request has had a major timeout. - MajorTimeouts uint64 - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent uint64 - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived uint64 - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueMilliseconds uint64 - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseMilliseconds uint64 - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestMilliseconds uint64 - // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. - Errors uint64 -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string - // The local port used for the NFS mount. - Port uint64 - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind uint64 - // Number of times the client has made a TCP connection to the NFS server. - Connect uint64 - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime uint64 - // Duration since the NFS mount last saw any RPC traffic. - IdleTimeSeconds uint64 - // Number of RPC requests for this mount sent to the NFS server. - Sends uint64 - // Number of RPC responses for this mount received from the NFS server. - Receives uint64 - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs uint64 - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests uint64 - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog uint64 - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed uint64 - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue uint64 - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue uint64 -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldOpts = "opts:" - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - - switch ss[0] { - case fieldOpts: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - if stats.Opts == nil { - stats.Opts = map[string]string{} - } - for _, opt := range strings.Split(ss[1], ",") { - split := strings.Split(opt, "=") - if len(split) == 2 { - stats.Opts[split[0]] = split[1] - } else { - stats.Opts[opt] = "" - } - } - case fieldAge: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) - } - - tstats, err := parseNFSTransportStats(ss[1:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = *tstats - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them separately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) - } - - ns := make([]uint64, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) - } - - ns := make([]uint64, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Minimum number of expected fields in each per-operation statistics set - minFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) < minFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) - } - - // Skip string operation name for integers - ns := make([]uint64, 0, minFields-1) - for _, st := range ss[1:] { - n, err := strconv.ParseUint(st, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - opStats := NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueMilliseconds: ns[5], - CumulativeTotalResponseMilliseconds: ns[6], - CumulativeTotalRequestMilliseconds: ns[7], - } - - if len(ns) > 8 { - opStats.Errors = ns[8] - } - - ops = append(ops, opStats) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - - switch statVersion { - case statVersion10: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport10UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) - } - case statVersion11: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport11UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) - } - default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. - // - // Note: slice length must be set to length of v1.1 stats to avoid a panic when - // only v1.0 stats are present. - // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) - for i, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns[i] = n - } - - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - if protocol == "udp" { - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } - - return &NFSTransportStats{ - Protocol: protocol, - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTimeSeconds: ns[4], - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go deleted file mode 100644 index 8300daca0..000000000 --- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A ConntrackStatEntry represents one line from net/stat/nf_conntrack -// and contains netfilter conntrack statistics at one CPU core. -type ConntrackStatEntry struct { - Entries uint64 - Found uint64 - Invalid uint64 - Ignore uint64 - Insert uint64 - InsertFailed uint64 - Drop uint64 - EarlyDrop uint64 - SearchRestart uint64 -} - -// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores. -func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) { - return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack")) -} - -// Parses a slice of ConntrackStatEntries from the given filepath. -func readConntrackStat(path string) ([]ConntrackStatEntry, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(path) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseConntrackStat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err) - } - - return stat, nil -} - -// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries. -func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) { - var entries []ConntrackStatEntry - - scanner := bufio.NewScanner(r) - scanner.Scan() - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - conntrackEntry, err := parseConntrackStatEntry(fields) - if err != nil { - return nil, err - } - entries = append(entries, *conntrackEntry) - } - - return entries, nil -} - -// Parses a ConntrackStatEntry from given array of fields. -func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { - if len(fields) != 17 { - return nil, fmt.Errorf("invalid conntrackstat entry, missing fields") - } - entry := &ConntrackStatEntry{} - - entries, err := parseConntrackStatField(fields[0]) - if err != nil { - return nil, err - } - entry.Entries = entries - - found, err := parseConntrackStatField(fields[2]) - if err != nil { - return nil, err - } - entry.Found = found - - invalid, err := parseConntrackStatField(fields[4]) - if err != nil { - return nil, err - } - entry.Invalid = invalid - - ignore, err := parseConntrackStatField(fields[5]) - if err != nil { - return nil, err - } - entry.Ignore = ignore - - insert, err := parseConntrackStatField(fields[8]) - if err != nil { - return nil, err - } - entry.Insert = insert - - insertFailed, err := parseConntrackStatField(fields[9]) - if err != nil { - return nil, err - } - entry.InsertFailed = insertFailed - - drop, err := parseConntrackStatField(fields[10]) - if err != nil { - return nil, err - } - entry.Drop = drop - - earlyDrop, err := parseConntrackStatField(fields[11]) - if err != nil { - return nil, err - } - entry.EarlyDrop = earlyDrop - - searchRestart, err := parseConntrackStatField(fields[16]) - if err != nil { - return nil, err - } - entry.SearchRestart = searchRestart - - return entry, nil -} - -// Parses a uint64 from given hex in string. -func parseConntrackStatField(field string) (uint64, error) { - val, err := strconv.ParseUint(field, 16, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse %q field: %w", field, err) - } - return val, err -} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index e66208aa0..000000000 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NetDev() (NetDev, error) { - return newNetDev(fs.proc.Path("net/dev")) -} - -// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - netDev := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := netDev.parseLine(s.Text()) - if err != nil { - return netDev, err - } - - netDev[line.Name] = *line - } - - return netDev, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - idx := strings.LastIndex(rawLine, ":") - if idx == -1 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(rawLine[:idx]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (netDev NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(netDev)) - for _, ifc := range netDev { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go deleted file mode 100644 index 7fd57d7f4..000000000 --- a/vendor/github.com/prometheus/procfs/net_ip_socket.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - "strconv" - "strings" -) - -const ( - // readLimit is used by io.LimitReader while reading the content of the - // /proc/net/udp{,6} files. The number of lines inside such a file is dynamic - // as each line represents a single used socket. - // In theory, the number of available sockets is 65535 (2^16 - 1) per IP. - // With e.g. 150 Byte per line and the maximum number of 65535, - // the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP. - readLimit = 4294967296 // Byte -> 4 GiB -) - -// This contains generic data structures for both udp and tcp sockets. -type ( - // NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header. - NetIPSocket []*netIPSocketLine - - // NetIPSocketSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetIPSocket it does not collect - // the parsed lines into a slice. - NetIPSocketSummary struct { - // TxQueueLength shows the total queue length of all parsed tx_queue lengths. - TxQueueLength uint64 - // RxQueueLength shows the total queue length of all parsed rx_queue lengths. - RxQueueLength uint64 - // UsedSockets shows the total number of parsed lines representing the - // number of used sockets. - UsedSockets uint64 - } - - // netIPSocketLine represents the fields parsed from a single line - // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped. - // For the proc file format details, see https://linux.die.net/man/5/proc. - netIPSocketLine struct { - Sl uint64 - LocalAddr net.IP - LocalPort uint64 - RemAddr net.IP - RemPort uint64 - St uint64 - TxQueue uint64 - RxQueue uint64 - UID uint64 - Inode uint64 - } -) - -func newNetIPSocket(file string) (NetIPSocket, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocket NetIPSocket - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) - if err != nil { - return nil, err - } - netIPSocket = append(netIPSocket, line) - } - if err := s.Err(); err != nil { - return nil, err - } - return netIPSocket, nil -} - -// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file. -func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - var netIPSocketSummary NetIPSocketSummary - - lr := io.LimitReader(f, readLimit) - s := bufio.NewScanner(lr) - s.Scan() // skip first line with headers - for s.Scan() { - fields := strings.Fields(s.Text()) - line, err := parseNetIPSocketLine(fields) - if err != nil { - return nil, err - } - netIPSocketSummary.TxQueueLength += line.TxQueue - netIPSocketSummary.RxQueueLength += line.RxQueue - netIPSocketSummary.UsedSockets++ - } - if err := s.Err(); err != nil { - return nil, err - } - return &netIPSocketSummary, nil -} - -// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order. - -func parseIP(hexIP string) (net.IP, error) { - var byteIP []byte - byteIP, err := hex.DecodeString(hexIP) - if err != nil { - return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP) - } - switch len(byteIP) { - case 4: - return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil - case 16: - i := net.IP{ - byteIP[3], byteIP[2], byteIP[1], byteIP[0], - byteIP[7], byteIP[6], byteIP[5], byteIP[4], - byteIP[11], byteIP[10], byteIP[9], byteIP[8], - byteIP[15], byteIP[14], byteIP[13], byteIP[12], - } - return i, nil - default: - return nil, fmt.Errorf("Unable to parse IP %s", hexIP) - } -} - -// parseNetIPSocketLine parses a single line, represented by a list of fields. -func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) { - line := &netIPSocketLine{} - if len(fields) < 10 { - return nil, fmt.Errorf( - "cannot parse net socket line as it has less then 10 columns %q", - strings.Join(fields, " "), - ) - } - var err error // parse error - - // sl - s := strings.Split(fields[0], ":") - if len(s) != 2 { - return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0]) - } - - if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err) - } - // local_address - l := strings.Split(fields[1], ":") - if len(l) != 2 { - return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1]) - } - if line.LocalAddr, err = parseIP(l[0]); err != nil { - return nil, err - } - if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err) - } - - // remote_address - r := strings.Split(fields[2], ":") - if len(r) != 2 { - return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1]) - } - if line.RemAddr, err = parseIP(r[0]); err != nil { - return nil, err - } - if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err) - } - - // st - if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse st value in socket line: %w", err) - } - - // tx_queue and rx_queue - q := strings.Split(fields[4], ":") - if len(q) != 2 { - return nil, fmt.Errorf( - "cannot parse tx/rx queues in socket line as it has a missing colon %q", - fields[4], - ) - } - if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err) - } - if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil { - return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err) - } - - // uid - if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err) - } - - // inode - if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil { - return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err) - } - - return line, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go deleted file mode 100644 index 374b6f73f..000000000 --- a/vendor/github.com/prometheus/procfs/net_protocols.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// NetProtocolStats stores the contents from /proc/net/protocols. -type NetProtocolStats map[string]NetProtocolStatLine - -// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We -// only care about the first six columns as the rest are not likely to change -// and only serve to provide a set of capabilities for each protocol. -type NetProtocolStatLine struct { - Name string // 0 The name of the protocol - Size uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock) - Sockets int64 // 2 Number of sockets in use by this protocol - Memory int64 // 3 Number of 4KB pages allocated by all sockets of this protocol - Pressure int // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure. - MaxHeader uint64 // 5 Protocol specific max header size - Slab bool // 6 Indicates whether or not memory is allocated from the SLAB - ModuleName string // 7 The name of the module that implemented this protocol or "kernel" if not from a module - Capabilities NetProtocolCapabilities -} - -// NetProtocolCapabilities contains a list of capabilities for each protocol. -type NetProtocolCapabilities struct { - Close bool // 8 - Connect bool // 9 - Disconnect bool // 10 - Accept bool // 11 - IoCtl bool // 12 - Init bool // 13 - Destroy bool // 14 - Shutdown bool // 15 - SetSockOpt bool // 16 - GetSockOpt bool // 17 - SendMsg bool // 18 - RecvMsg bool // 19 - SendPage bool // 20 - Bind bool // 21 - BacklogRcv bool // 22 - Hash bool // 23 - UnHash bool // 24 - GetPort bool // 25 - EnterMemoryPressure bool // 26 -} - -// NetProtocols reads stats from /proc/net/protocols and returns a map of -// PortocolStatLine entries. As of this writing no official Linux Documentation -// exists, however the source is fairly self-explanatory and the format seems -// stable since its introduction in 2.6.12-rc2 -// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452 -// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586 -func (fs FS) NetProtocols() (NetProtocolStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols")) - if err != nil { - return NetProtocolStats{}, err - } - return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data))) -} - -func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) { - nps := NetProtocolStats{} - - // Skip the header line - s.Scan() - - for s.Scan() { - line, err := nps.parseLine(s.Text()) - if err != nil { - return NetProtocolStats{}, err - } - - nps[line.Name] = *line - } - return nps, nil -} - -func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) { - line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}} - var err error - const enabled = "yes" - const disabled = "no" - - fields := strings.Fields(rawLine) - line.Name = fields[0] - line.Size, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.Sockets, err = strconv.ParseInt(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.Memory, err = strconv.ParseInt(fields[3], 10, 64) - if err != nil { - return nil, err - } - if fields[4] == enabled { - line.Pressure = 1 - } else if fields[4] == disabled { - line.Pressure = 0 - } else { - line.Pressure = -1 - } - line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - if fields[6] == enabled { - line.Slab = true - } else if fields[6] == disabled { - line.Slab = false - } else { - return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name) - } - line.ModuleName = fields[7] - - err = line.Capabilities.parseCapabilities(fields[8:]) - if err != nil { - return nil, err - } - - return line, nil -} - -func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error { - // The capabilities are all bools so we can loop over to map them - capabilityFields := [...]*bool{ - &pc.Close, - &pc.Connect, - &pc.Disconnect, - &pc.Accept, - &pc.IoCtl, - &pc.Init, - &pc.Destroy, - &pc.Shutdown, - &pc.SetSockOpt, - &pc.GetSockOpt, - &pc.SendMsg, - &pc.RecvMsg, - &pc.SendPage, - &pc.Bind, - &pc.BacklogRcv, - &pc.Hash, - &pc.UnHash, - &pc.GetPort, - &pc.EnterMemoryPressure, - } - - for i := 0; i < len(capabilities); i++ { - if capabilities[i] == "y" { - *capabilityFields[i] = true - } else if capabilities[i] == "n" { - *capabilityFields[i] = false - } else { - return fmt.Errorf("unable to parse capability block for protocol: position %d", i) - } - } - return nil -} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go deleted file mode 100644 index e36f4872d..000000000 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, -// respectively. -type NetSockstat struct { - // Used is non-nil for IPv4 sockstat results, but nil for IPv6. - Used *int - Protocols []NetSockstatProtocol -} - -// A NetSockstatProtocol contains statistics about a given socket protocol. -// Pointer fields indicate that the value may or may not be present on any -// given protocol. -type NetSockstatProtocol struct { - Protocol string - InUse int - Orphan *int - TW *int - Alloc *int - Mem *int - Memory *int -} - -// NetSockstat retrieves IPv4 socket statistics. -func (fs FS) NetSockstat() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat")) -} - -// NetSockstat6 retrieves IPv6 socket statistics. -// -// If IPv6 is disabled on this kernel, the returned error can be checked with -// os.IsNotExist. -func (fs FS) NetSockstat6() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat6")) -} - -// readSockstat opens and parses a NetSockstat from the input file. -func readSockstat(name string) (*NetSockstat, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(name) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseSockstat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err) - } - - return stat, nil -} - -// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. -func parseSockstat(r io.Reader) (*NetSockstat, error) { - var stat NetSockstat - s := bufio.NewScanner(r) - for s.Scan() { - // Expect a minimum of a protocol and one key/value pair. - fields := strings.Split(s.Text(), " ") - if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) - } - - // The remaining fields are key/value pairs. - kvs, err := parseSockstatKVs(fields[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err) - } - - // The first field is the protocol. We must trim its colon suffix. - proto := strings.TrimSuffix(fields[0], ":") - switch proto { - case "sockets": - // Special case: IPv4 has a sockets "used" key/value pair that we - // embed at the top level of the structure. - used := kvs["used"] - stat.Used = &used - default: - // Parse all other lines as individual protocols. - nsp := parseSockstatProtocol(kvs) - nsp.Protocol = proto - stat.Protocols = append(stat.Protocols, nsp) - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return &stat, nil -} - -// parseSockstatKVs parses a string slice into a map of key/value pairs. -func parseSockstatKVs(kvs []string) (map[string]int, error) { - if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") - } - - // Iterate two values at a time to gather key/value pairs. - out := make(map[string]int, len(kvs)/2) - for i := 0; i < len(kvs); i += 2 { - vp := util.NewValueParser(kvs[i+1]) - out[kvs[i]] = vp.Int() - - if err := vp.Err(); err != nil { - return nil, err - } - } - - return out, nil -} - -// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. -func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { - var nsp NetSockstatProtocol - for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v - switch k { - case "inuse": - nsp.InUse = v - case "orphan": - nsp.Orphan = &v - case "tw": - nsp.TW = &v - case "alloc": - nsp.Alloc = &v - case "mem": - nsp.Mem = &v - case "memory": - nsp.Memory = &v - } - } - - return nsp -} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go deleted file mode 100644 index 06b7b8f21..000000000 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// For the proc file format details, -// See: -// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343 -// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 -// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 -// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 - -// SoftnetStat contains a single row of data from /proc/net/softnet_stat. -type SoftnetStat struct { - // Number of processed packets. - Processed uint32 - // Number of dropped packets. - Dropped uint32 - // Number of times processing packets ran out of quota. - TimeSqueezed uint32 - // Number of collision occur while obtaining device lock while transmitting. - CPUCollision uint32 - // Number of times cpu woken up received_rps. - ReceivedRps uint32 - // number of times flow limit has been reached. - FlowLimitCount uint32 - // Softnet backlog status. - SoftnetBacklogLen uint32 - // CPU id owning this softnet_data. - Index uint32 - // softnet_data's Width. - Width int -} - -var softNetProcFile = "net/softnet_stat" - -// NetSoftnetStat reads data from /proc/net/softnet_stat. -func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) { - b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile)) - if err != nil { - return nil, err - } - - entries, err := parseSoftnet(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err) - } - - return entries, nil -} - -func parseSoftnet(r io.Reader) ([]SoftnetStat, error) { - const minColumns = 9 - - s := bufio.NewScanner(r) - - var stats []SoftnetStat - for s.Scan() { - columns := strings.Fields(s.Text()) - width := len(columns) - softnetStat := SoftnetStat{} - - if width < minColumns { - return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns) - } - - // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347 - if width >= minColumns { - us, err := parseHexUint32s(columns[0:9]) - if err != nil { - return nil, err - } - - softnetStat.Processed = us[0] - softnetStat.Dropped = us[1] - softnetStat.TimeSqueezed = us[2] - softnetStat.CPUCollision = us[8] - } - - // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086 - if width >= 10 { - us, err := parseHexUint32s(columns[9:10]) - if err != nil { - return nil, err - } - - softnetStat.ReceivedRps = us[0] - } - - // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162 - if width >= 11 { - us, err := parseHexUint32s(columns[10:11]) - if err != nil { - return nil, err - } - - softnetStat.FlowLimitCount = us[0] - } - - // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169 - if width >= 13 { - us, err := parseHexUint32s(columns[11:13]) - if err != nil { - return nil, err - } - - softnetStat.SoftnetBacklogLen = us[0] - softnetStat.Index = us[1] - } - softnetStat.Width = width - stats = append(stats, softnetStat) - } - - return stats, nil -} - -func parseHexUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go deleted file mode 100644 index 527762955..000000000 --- a/vendor/github.com/prometheus/procfs/net_tcp.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetTCP represents the contents of /proc/net/tcp{,6} file without the header. - NetTCP []*netIPSocketLine - - // NetTCPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetTCP it does not collect - // the parsed lines into a slice. - NetTCPSummary NetIPSocketSummary -) - -// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp. -func (fs FS) NetTCP() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp")) -} - -// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams -// read from /proc/net/tcp6. -func (fs FS) NetTCP6() (NetTCP, error) { - return newNetTCP(fs.proc.Path("net/tcp6")) -} - -// NetTCPSummary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp. -func (fs FS) NetTCPSummary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp")) -} - -// NetTCP6Summary returns already computed statistics like the total queue lengths -// for TCP datagrams read from /proc/net/tcp6. -func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) { - return newNetTCPSummary(fs.proc.Path("net/tcp6")) -} - -// newNetTCP creates a new NetTCP{,6} from the contents of the given file. -func newNetTCP(file string) (NetTCP, error) { - n, err := newNetIPSocket(file) - n1 := NetTCP(n) - return n1, err -} - -func newNetTCPSummary(file string) (*NetTCPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetTCPSummary(*n) - return &n1, err -} diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go deleted file mode 100644 index 9ac3daf2d..000000000 --- a/vendor/github.com/prometheus/procfs/net_udp.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -type ( - // NetUDP represents the contents of /proc/net/udp{,6} file without the header. - NetUDP []*netIPSocketLine - - // NetUDPSummary provides already computed values like the total queue lengths or - // the total number of used sockets. In contrast to NetUDP it does not collect - // the parsed lines into a slice. - NetUDPSummary NetIPSocketSummary -) - -// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp. -func (fs FS) NetUDP() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp")) -} - -// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams -// read from /proc/net/udp6. -func (fs FS) NetUDP6() (NetUDP, error) { - return newNetUDP(fs.proc.Path("net/udp6")) -} - -// NetUDPSummary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp. -func (fs FS) NetUDPSummary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp")) -} - -// NetUDP6Summary returns already computed statistics like the total queue lengths -// for UDP datagrams read from /proc/net/udp6. -func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) { - return newNetUDPSummary(fs.proc.Path("net/udp6")) -} - -// newNetUDP creates a new NetUDP{,6} from the contents of the given file. -func newNetUDP(file string) (NetUDP, error) { - n, err := newNetIPSocket(file) - n1 := NetUDP(n) - return n1, err -} - -func newNetUDPSummary(file string) (*NetUDPSummary, error) { - n, err := newNetIPSocketSummary(file) - if n == nil { - return nil, err - } - n1 := NetUDPSummary(*n) - return &n1, err -} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go deleted file mode 100644 index 98aa8e1c3..000000000 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 -// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. - -// Constants for the various /proc/net/unix enumerations. -// TODO: match against x/sys/unix or similar? -const ( - netUnixTypeStream = 1 - netUnixTypeDgram = 2 - netUnixTypeSeqpacket = 5 - - netUnixFlagDefault = 0 - netUnixFlagListen = 1 << 16 - - netUnixStateUnconnected = 1 - netUnixStateConnecting = 2 - netUnixStateConnected = 3 - netUnixStateDisconnected = 4 -) - -// NetUNIXType is the type of the type field. -type NetUNIXType uint64 - -// NetUNIXFlags is the type of the flags field. -type NetUNIXFlags uint64 - -// NetUNIXState is the type of the state field. -type NetUNIXState uint64 - -// NetUNIXLine represents a line of /proc/net/unix. -type NetUNIXLine struct { - KernelPtr string - RefCount uint64 - Protocol uint64 - Flags NetUNIXFlags - Type NetUNIXType - State NetUNIXState - Inode uint64 - Path string -} - -// NetUNIX holds the data read from /proc/net/unix. -type NetUNIX struct { - Rows []*NetUNIXLine -} - -// NetUNIX returns data read from /proc/net/unix. -func (fs FS) NetUNIX() (*NetUNIX, error) { - return readNetUNIX(fs.proc.Path("net/unix")) -} - -// readNetUNIX reads data in /proc/net/unix format from the specified file. -func readNetUNIX(file string) (*NetUNIX, error) { - // This file could be quite large and a streaming read is desirable versus - // reading the entire contents at once. - f, err := os.Open(file) - if err != nil { - return nil, err - } - defer f.Close() - - return parseNetUNIX(f) -} - -// parseNetUNIX creates a NetUnix structure from the incoming stream. -func parseNetUNIX(r io.Reader) (*NetUNIX, error) { - // Begin scanning by checking for the existence of Inode. - s := bufio.NewScanner(r) - s.Scan() - - // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. This code works for both cases. - hasInode := strings.Contains(s.Text(), "Inode") - - // Expect a minimum number of fields, but Inode and Path are optional: - // Num RefCount Protocol Flags Type St Inode Path - minFields := 6 - if hasInode { - minFields++ - } - - var nu NetUNIX - for s.Scan() { - line := s.Text() - item, err := nu.parseLine(line, hasInode, minFields) - if err != nil { - return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err) - } - - nu.Rows = append(nu.Rows, item) - } - - if err := s.Err(); err != nil { - return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err) - } - - return &nu, nil -} - -func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) { - fields := strings.Fields(line) - - l := len(fields) - if l < min { - return nil, fmt.Errorf("expected at least %d fields but got %d", min, l) - } - - // Field offsets are as follows: - // Num RefCount Protocol Flags Type St Inode Path - - kernelPtr := strings.TrimSuffix(fields[0], ":") - - users, err := u.parseUsers(fields[1]) - if err != nil { - return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err) - } - - flags, err := u.parseFlags(fields[3]) - if err != nil { - return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err) - } - - typ, err := u.parseType(fields[4]) - if err != nil { - return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err) - } - - state, err := u.parseState(fields[5]) - if err != nil { - return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err) - } - - var inode uint64 - if hasInode { - inode, err = u.parseInode(fields[6]) - if err != nil { - return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err) - } - } - - n := &NetUNIXLine{ - KernelPtr: kernelPtr, - RefCount: users, - Type: typ, - Flags: flags, - State: state, - Inode: inode, - } - - // Path field is optional. - if l > min { - // Path occurs at either index 6 or 7 depending on whether inode is - // already present. - pathIdx := 7 - if !hasInode { - pathIdx-- - } - - n.Path = fields[pathIdx] - } - - return n, nil -} - -func (u NetUNIX) parseUsers(s string) (uint64, error) { - return strconv.ParseUint(s, 16, 32) -} - -func (u NetUNIX) parseType(s string) (NetUNIXType, error) { - typ, err := strconv.ParseUint(s, 16, 16) - if err != nil { - return 0, err - } - - return NetUNIXType(typ), nil -} - -func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) { - flags, err := strconv.ParseUint(s, 16, 32) - if err != nil { - return 0, err - } - - return NetUNIXFlags(flags), nil -} - -func (u NetUNIX) parseState(s string) (NetUNIXState, error) { - st, err := strconv.ParseInt(s, 16, 8) - if err != nil { - return 0, err - } - - return NetUNIXState(st), nil -} - -func (u NetUNIX) parseInode(s string) (uint64, error) { - return strconv.ParseUint(s, 10, 64) -} - -func (t NetUNIXType) String() string { - switch t { - case netUnixTypeStream: - return "stream" - case netUnixTypeDgram: - return "dgram" - case netUnixTypeSeqpacket: - return "seqpacket" - } - return "unknown" -} - -func (f NetUNIXFlags) String() string { - switch f { - case netUnixFlagListen: - return "listen" - default: - return "default" - } -} - -func (s NetUNIXState) String() string { - switch s { - case netUnixStateUnconnected: - return "unconnected" - case netUnixStateConnecting: - return "connecting" - case netUnixStateConnected: - return "connected" - case netUnixStateDisconnected: - return "disconnected" - } - return "unknown" -} diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go deleted file mode 100644 index f9d9d243d..000000000 --- a/vendor/github.com/prometheus/procfs/net_xfrm.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - // Forward routing of a packet is not allowed - XfrmFwdHdrError int - // State is invalid, perhaps expired - XfrmOutStateInvalid int - // State hasn’t been fully acquired before use - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.proc.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go deleted file mode 100644 index 5cc40aef5..000000000 --- a/vendor/github.com/prometheus/procfs/netstat.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "io" - "os" - "path/filepath" - "strconv" - "strings" -) - -// NetStat contains statistics for all the counters from one file. -type NetStat struct { - Stats map[string][]uint64 - Filename string -} - -// NetStat retrieves stats from `/proc/net/stat/`. -func (fs FS) NetStat() ([]NetStat, error) { - statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*")) - if err != nil { - return nil, err - } - - var netStatsTotal []NetStat - - for _, filePath := range statFiles { - file, err := os.Open(filePath) - if err != nil { - return nil, err - } - - procNetstat, err := parseNetstat(file) - if err != nil { - return nil, err - } - procNetstat.Filename = filepath.Base(filePath) - - netStatsTotal = append(netStatsTotal, procNetstat) - } - return netStatsTotal, nil -} - -// parseNetstat parses the metrics from `/proc/net/stat/` file -// and returns a NetStat structure. -func parseNetstat(r io.Reader) (NetStat, error) { - var ( - scanner = bufio.NewScanner(r) - netStat = NetStat{ - Stats: make(map[string][]uint64), - } - ) - - scanner.Scan() - - // First string is always a header for stats - var headers []string - headers = append(headers, strings.Fields(scanner.Text())...) - - // Other strings represent per-CPU counters - for scanner.Scan() { - for num, counter := range strings.Fields(scanner.Text()) { - value, err := strconv.ParseUint(counter, 16, 64) - if err != nil { - return NetStat{}, err - } - netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value) - } - } - - return netStat, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index c30223af7..000000000 --- a/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs fs.FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.proc.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// NewProc returns a process for the given pid. -// -// Deprecated: Use fs.Proc() instead. -func (fs FS) NewProc(pid int) (Proc, error) { - return fs.Proc(pid) -} - -// Proc returns a process for the given pid. -func (fs FS) Proc(pid int) (Proc, error) { - if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs.proc}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.proc.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(p.path("cmdline")) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil -} - -// Wchan returns the wchan (wait channel) of a process. -func (p Proc) Wchan() (string, error) { - f, err := os.Open(p.path("wchan")) - if err != nil { - return "", err - } - defer f.Close() - - data, err := io.ReadAll(f) - if err != nil { - return "", err - } - - wchan := string(data) - if wchan == "" || wchan == "0" { - return "", nil - } - - return wchan, nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - data, err := util.ReadFileNoStat(p.path("comm")) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot). -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse fd %q: %w", n, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - -// MountInfo retrieves mount information for mount points in a -// process's namespace. -// It supplies information missing in `/proc/self/mounts` and -// fixes various other problems with that file too. -func (p Proc) MountInfo() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(p.path("mountinfo")) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("could not read %q: %w", d.Name(), err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} - -// FileDescriptorsInfo retrieves information about all file descriptors of -// the process. -func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - var fdinfos ProcFDInfos - - for _, n := range names { - fdinfo, err := p.FDInfo(n) - if err != nil { - continue - } - fdinfos = append(fdinfos, *fdinfo) - } - - return fdinfos, nil -} - -// Schedstat returns task scheduling information for the process. -func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := os.ReadFile(p.path("schedstat")) - if err != nil { - return ProcSchedstat{}, err - } - return parseProcSchedstat(string(contents)) -} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go deleted file mode 100644 index ea83a75ff..000000000 --- a/vendor/github.com/prometheus/procfs/proc_cgroup.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a -// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource -// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies -// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in -// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of -// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID -// in this hierarchy -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type Cgroup struct { - // HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one - // hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number - HierarchyID int - // Controllers using this hierarchy of processes. Controllers are also known as subsystems. For - // Cgroups V2 this may be empty, as all active controllers use the same hierarchy - Controllers []string - // Path of this control group, relative to the mount point of the cgroupfs representing this specific - // hierarchy - Path string -} - -// parseCgroupString parses each line of the /proc/[pid]/cgroup file -// Line format is hierarchyID:[controller1,controller2]:path. -func parseCgroupString(cgroupStr string) (*Cgroup, error) { - var err error - - fields := strings.SplitN(cgroupStr, ":", 3) - if len(fields) < 3 { - return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr) - } - - cgroup := &Cgroup{ - Path: fields[2], - Controllers: nil, - } - cgroup.HierarchyID, err = strconv.Atoi(fields[0]) - if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") - } - if fields[1] != "" { - ssNames := strings.Split(fields[1], ",") - cgroup.Controllers = append(cgroup.Controllers, ssNames...) - } - return cgroup, nil -} - -// parseCgroups reads each line of the /proc/[pid]/cgroup file. -func parseCgroups(data []byte) ([]Cgroup, error) { - var cgroups []Cgroup - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseCgroupString(mountString) - if err != nil { - return nil, err - } - cgroups = append(cgroups, *parsedMounts) - } - - err := scanner.Err() - return cgroups, err -} - -// Cgroups reads from /proc//cgroups and returns a []*Cgroup struct locating this PID in each process -// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes, -// so the len of the returned struct is equal to the number of active hierarchies on this system. -func (p Proc) Cgroups() ([]Cgroup, error) { - data, err := util.ReadFileNoStat(p.path("cgroup")) - if err != nil { - return nil, err - } - return parseCgroups(data) -} diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go deleted file mode 100644 index 24d4dce9c..000000000 --- a/vendor/github.com/prometheus/procfs/proc_cgroups.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2021 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CgroupSummary models one line from /proc/cgroups. -// This file contains information about the controllers that are compiled into the kernel. -// -// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html -type CgroupSummary struct { - // The name of the controller. controller is also known as subsystem. - SubsysName string - // The unique ID of the cgroup hierarchy on which this controller is mounted. - Hierarchy int - // The number of control groups in this hierarchy using this controller. - Cgroups int - // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled - Enabled int -} - -// parseCgroupSummary parses each line of the /proc/cgroup file -// Line format is `subsys_name hierarchy num_cgroups enabled`. -func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) { - var err error - - fields := strings.Fields(CgroupSummaryStr) - // require at least 4 fields - if len(fields) < 4 { - return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr) - } - - CgroupSummary := &CgroupSummary{ - SubsysName: fields[0], - } - CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1]) - if err != nil { - return nil, fmt.Errorf("failed to parse hierarchy ID") - } - CgroupSummary.Cgroups, err = strconv.Atoi(fields[2]) - if err != nil { - return nil, fmt.Errorf("failed to parse Cgroup Num") - } - CgroupSummary.Enabled, err = strconv.Atoi(fields[3]) - if err != nil { - return nil, fmt.Errorf("failed to parse Enabled") - } - return CgroupSummary, nil -} - -// parseCgroupSummary reads each line of the /proc/cgroup file. -func parseCgroupSummary(data []byte) ([]CgroupSummary, error) { - var CgroupSummarys []CgroupSummary - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - CgroupSummaryString := scanner.Text() - // ignore comment lines - if strings.HasPrefix(CgroupSummaryString, "#") { - continue - } - CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString) - if err != nil { - return nil, err - } - CgroupSummarys = append(CgroupSummarys, *CgroupSummary) - } - - err := scanner.Err() - return CgroupSummarys, err -} - -// CgroupSummarys returns information about current /proc/cgroups. -func (fs FS) CgroupSummarys() ([]CgroupSummary, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cgroups")) - if err != nil { - return nil, err - } - return parseCgroupSummary(data) -} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go deleted file mode 100644 index 57a89895d..000000000 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Environ reads process environments from `/proc//environ`. -func (p Proc) Environ() ([]string, error) { - environments := make([]string, 0) - - data, err := util.ReadFileNoStat(p.path("environ")) - if err != nil { - return environments, err - } - - environments = strings.Split(string(data), "\000") - if len(environments) > 0 { - environments = environments[:len(environments)-1] - } - - return environments, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go deleted file mode 100644 index 1bbdd4a8e..000000000 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) - rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) - rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) - rInotify = regexp.MustCompile(`^inotify`) - rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`) -) - -// ProcFDInfo contains represents file descriptor information. -type ProcFDInfo struct { - // File descriptor - FD string - // File offset - Pos string - // File access mode and status flags - Flags string - // Mount point ID - MntID string - // List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only) - InotifyInfos []InotifyInfo -} - -// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. -func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { - data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) - if err != nil { - return nil, err - } - - var text, pos, flags, mntid string - var inotify []InotifyInfo - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - text = scanner.Text() - if rPos.MatchString(text) { - pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { - flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { - mntid = rMntID.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { - newInotify, err := parseInotifyInfo(text) - if err != nil { - return nil, err - } - inotify = append(inotify, *newInotify) - } - } - - i := &ProcFDInfo{ - FD: fd, - Pos: pos, - Flags: flags, - MntID: mntid, - InotifyInfos: inotify, - } - - return i, nil -} - -// InotifyInfo represents a single inotify line in the fdinfo file. -type InotifyInfo struct { - // Watch descriptor number - WD string - // Inode number - Ino string - // Device ID - Sdev string - // Mask of events being monitored - Mask string -} - -// InotifyInfo constructor. Only available on kernel 3.8+. -func parseInotifyInfo(line string) (*InotifyInfo, error) { - m := rInotifyParts.FindStringSubmatch(line) - if len(m) >= 4 { - var mask string - if len(m) == 5 { - mask = m[4] - } - i := &InotifyInfo{ - WD: m[1], - Ino: m[2], - Sdev: m[3], - Mask: mask, - } - return i, nil - } - return nil, fmt.Errorf("invalid inode entry: %q", line) -} - -// ProcFDInfos represents a list of ProcFDInfo structs. -type ProcFDInfos []ProcFDInfo - -func (p ProcFDInfos) Len() int { return len(p) } -func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } - -// InotifyWatchLen returns the total number of inotify watches. -func (p ProcFDInfos) InotifyWatchLen() (int, error) { - length := 0 - for _, f := range p { - length += len(f.InotifyInfos) - } - - return length, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go deleted file mode 100644 index 9df79c237..000000000 --- a/vendor/github.com/prometheus/procfs/proc_interrupts.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Interrupt represents a single interrupt line. -type Interrupt struct { - // Info is the type of interrupt. - Info string - // Devices is the name of the device that is located at that IRQ - Devices string - // Values is the number of interrupts per CPU. - Values []string -} - -// Interrupts models the content of /proc/interrupts. Key is the IRQ number. -// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts -// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output -type Interrupts map[string]Interrupt - -// Interrupts creates a new instance from a given Proc instance. -func (p Proc) Interrupts() (Interrupts, error) { - data, err := util.ReadFileNoStat(p.path("interrupts")) - if err != nil { - return nil, err - } - return parseInterrupts(bytes.NewReader(data)) -} - -func parseInterrupts(r io.Reader) (Interrupts, error) { - var ( - interrupts = Interrupts{} - scanner = bufio.NewScanner(r) - ) - - if !scanner.Scan() { - return nil, errors.New("interrupts empty") - } - cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu - - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - if len(parts) == 0 { // skip empty lines - continue - } - if len(parts) < 2 { - return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts) - } - intName := parts[0][:len(parts[0])-1] // remove trailing : - - if len(parts) == 2 { - interrupts[intName] = Interrupt{ - Info: "", - Devices: "", - Values: []string{ - parts[1], - }, - } - continue - } - - intr := Interrupt{ - Values: parts[1 : cpuNum+1], - } - - if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt - intr.Info = parts[cpuNum+1] - intr.Devices = strings.Join(parts[cpuNum+2:], " ") - } else { - intr.Info = strings.Join(parts[cpuNum+1:], " ") - } - interrupts[intName] = intr - } - - return interrupts, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index 776f34971..000000000 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// IO creates a new ProcIO instance from a given Proc instance. -func (p Proc) IO() (ProcIO, error) { - pio := ProcIO{} - - data, err := util.ReadFileNoStat(p.path("io")) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - - return pio, err -} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index 7a1388185..000000000 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime uint64 - // Maximum size of files that the process may create. - FileSize uint64 - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize uint64 - // Maximum size of the process stack in bytes. - StackSize uint64 - // Maximum size of a core file. - CoreFileSize uint64 - // Limit of the process's resident set in pages. - ResidentSet uint64 - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes uint64 - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles uint64 - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory uint64 - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace uint64 - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks uint64 - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals uint64 - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize uint64 - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority uint64 - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority uint64 - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout uint64 -} - -const ( - limitsFields = 4 - limitsUnlimited = "unlimited" -) - -var ( - limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`) -) - -// NewLimits returns the current soft limits of the process. -// -// Deprecated: Use p.Limits() instead. -func (p Proc) NewLimits() (ProcLimits, error) { - return p.Limits() -} - -// Limits returns the current soft limits of the process. -func (p Proc) Limits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - - s.Scan() // Skip limits header - - for s.Scan() { - //fields := limitsMatch.Split(s.Text(), limitsFields) - fields := limitsMatch.FindStringSubmatch(s.Text()) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text()) - } - - switch fields[1] { - case "Max cpu time": - l.CPUTime, err = parseUint(fields[2]) - case "Max file size": - l.FileSize, err = parseUint(fields[2]) - case "Max data size": - l.DataSize, err = parseUint(fields[2]) - case "Max stack size": - l.StackSize, err = parseUint(fields[2]) - case "Max core file size": - l.CoreFileSize, err = parseUint(fields[2]) - case "Max resident set": - l.ResidentSet, err = parseUint(fields[2]) - case "Max processes": - l.Processes, err = parseUint(fields[2]) - case "Max open files": - l.OpenFiles, err = parseUint(fields[2]) - case "Max locked memory": - l.LockedMemory, err = parseUint(fields[2]) - case "Max address space": - l.AddressSpace, err = parseUint(fields[2]) - case "Max file locks": - l.FileLocks, err = parseUint(fields[2]) - case "Max pending signals": - l.PendingSignals, err = parseUint(fields[2]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseUint(fields[2]) - case "Max nice priority": - l.NicePriority, err = parseUint(fields[2]) - case "Max realtime priority": - l.RealtimePriority, err = parseUint(fields[2]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseUint(fields[2]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseUint(s string) (uint64, error) { - if s == limitsUnlimited { - return 18446744073709551615, nil - } - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse value %q: %w", s, err) - } - return i, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go deleted file mode 100644 index f1bcbf32b..000000000 --- a/vendor/github.com/prometheus/procfs/proc_maps.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris -// +build !js - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - - "golang.org/x/sys/unix" -) - -// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`. -type ProcMapPermissions struct { - // mapping has the [R]ead flag set - Read bool - // mapping has the [W]rite flag set - Write bool - // mapping has the [X]ecutable flag set - Execute bool - // mapping has the [S]hared flag set - Shared bool - // mapping is marked as [P]rivate (copy on write) - Private bool -} - -// ProcMap contains the process memory-mappings of the process -// read from `/proc/[pid]/maps`. -type ProcMap struct { - // The start address of current mapping. - StartAddr uintptr - // The end address of the current mapping - EndAddr uintptr - // The permissions for this mapping - Perms *ProcMapPermissions - // The current offset into the file/fd (e.g., shared libs) - Offset int64 - // Device owner of this mapping (major:minor) in Mkdev format. - Dev uint64 - // The inode of the device above - Inode uint64 - // The file or psuedofile (or empty==anonymous) - Pathname string -} - -// parseDevice parses the device token of a line and converts it to a dev_t -// (mkdev) like structure. -func parseDevice(s string) (uint64, error) { - toks := strings.Split(s, ":") - if len(toks) < 2 { - return 0, fmt.Errorf("unexpected number of fields") - } - - major, err := strconv.ParseUint(toks[0], 16, 0) - if err != nil { - return 0, err - } - - minor, err := strconv.ParseUint(toks[1], 16, 0) - if err != nil { - return 0, err - } - - return unix.Mkdev(uint32(major), uint32(minor)), nil -} - -// parseAddress converts a hex-string to a uintptr. -func parseAddress(s string) (uintptr, error) { - a, err := strconv.ParseUint(s, 16, 0) - if err != nil { - return 0, err - } - - return uintptr(a), nil -} - -// parseAddresses parses the start-end address. -func parseAddresses(s string) (uintptr, uintptr, error) { - toks := strings.Split(s, "-") - if len(toks) < 2 { - return 0, 0, fmt.Errorf("invalid address") - } - - saddr, err := parseAddress(toks[0]) - if err != nil { - return 0, 0, err - } - - eaddr, err := parseAddress(toks[1]) - if err != nil { - return 0, 0, err - } - - return saddr, eaddr, nil -} - -// parsePermissions parses a token and returns any that are set. -func parsePermissions(s string) (*ProcMapPermissions, error) { - if len(s) < 4 { - return nil, fmt.Errorf("invalid permissions token") - } - - perms := ProcMapPermissions{} - for _, ch := range s { - switch ch { - case 'r': - perms.Read = true - case 'w': - perms.Write = true - case 'x': - perms.Execute = true - case 'p': - perms.Private = true - case 's': - perms.Shared = true - } - } - - return &perms, nil -} - -// parseProcMap will attempt to parse a single line within a proc/[pid]/maps -// buffer. -func parseProcMap(text string) (*ProcMap, error) { - fields := strings.Fields(text) - if len(fields) < 5 { - return nil, fmt.Errorf("truncated procmap entry") - } - - saddr, eaddr, err := parseAddresses(fields[0]) - if err != nil { - return nil, err - } - - perms, err := parsePermissions(fields[1]) - if err != nil { - return nil, err - } - - offset, err := strconv.ParseInt(fields[2], 16, 0) - if err != nil { - return nil, err - } - - device, err := parseDevice(fields[3]) - if err != nil { - return nil, err - } - - inode, err := strconv.ParseUint(fields[4], 10, 0) - if err != nil { - return nil, err - } - - pathname := "" - - if len(fields) >= 5 { - pathname = strings.Join(fields[5:], " ") - } - - return &ProcMap{ - StartAddr: saddr, - EndAddr: eaddr, - Perms: perms, - Offset: offset, - Dev: device, - Inode: inode, - Pathname: pathname, - }, nil -} - -// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the -// process. -func (p Proc) ProcMaps() ([]*ProcMap, error) { - file, err := os.Open(p.path("maps")) - if err != nil { - return nil, err - } - defer file.Close() - - maps := []*ProcMap{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - m, err := parseProcMap(scan.Text()) - if err != nil { - return nil, err - } - - maps = append(maps, m) - } - - return maps, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go deleted file mode 100644 index 6a43bb245..000000000 --- a/vendor/github.com/prometheus/procfs/proc_netstat.go +++ /dev/null @@ -1,443 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcNetstat models the content of /proc//net/netstat. -type ProcNetstat struct { - // The process ID. - PID int - TcpExt - IpExt -} - -type TcpExt struct { // nolint:revive - SyncookiesSent *float64 - SyncookiesRecv *float64 - SyncookiesFailed *float64 - EmbryonicRsts *float64 - PruneCalled *float64 - RcvPruned *float64 - OfoPruned *float64 - OutOfWindowIcmps *float64 - LockDroppedIcmps *float64 - ArpFilter *float64 - TW *float64 - TWRecycled *float64 - TWKilled *float64 - PAWSActive *float64 - PAWSEstab *float64 - DelayedACKs *float64 - DelayedACKLocked *float64 - DelayedACKLost *float64 - ListenOverflows *float64 - ListenDrops *float64 - TCPHPHits *float64 - TCPPureAcks *float64 - TCPHPAcks *float64 - TCPRenoRecovery *float64 - TCPSackRecovery *float64 - TCPSACKReneging *float64 - TCPSACKReorder *float64 - TCPRenoReorder *float64 - TCPTSReorder *float64 - TCPFullUndo *float64 - TCPPartialUndo *float64 - TCPDSACKUndo *float64 - TCPLossUndo *float64 - TCPLostRetransmit *float64 - TCPRenoFailures *float64 - TCPSackFailures *float64 - TCPLossFailures *float64 - TCPFastRetrans *float64 - TCPSlowStartRetrans *float64 - TCPTimeouts *float64 - TCPLossProbes *float64 - TCPLossProbeRecovery *float64 - TCPRenoRecoveryFail *float64 - TCPSackRecoveryFail *float64 - TCPRcvCollapsed *float64 - TCPDSACKOldSent *float64 - TCPDSACKOfoSent *float64 - TCPDSACKRecv *float64 - TCPDSACKOfoRecv *float64 - TCPAbortOnData *float64 - TCPAbortOnClose *float64 - TCPAbortOnMemory *float64 - TCPAbortOnTimeout *float64 - TCPAbortOnLinger *float64 - TCPAbortFailed *float64 - TCPMemoryPressures *float64 - TCPMemoryPressuresChrono *float64 - TCPSACKDiscard *float64 - TCPDSACKIgnoredOld *float64 - TCPDSACKIgnoredNoUndo *float64 - TCPSpuriousRTOs *float64 - TCPMD5NotFound *float64 - TCPMD5Unexpected *float64 - TCPMD5Failure *float64 - TCPSackShifted *float64 - TCPSackMerged *float64 - TCPSackShiftFallback *float64 - TCPBacklogDrop *float64 - PFMemallocDrop *float64 - TCPMinTTLDrop *float64 - TCPDeferAcceptDrop *float64 - IPReversePathFilter *float64 - TCPTimeWaitOverflow *float64 - TCPReqQFullDoCookies *float64 - TCPReqQFullDrop *float64 - TCPRetransFail *float64 - TCPRcvCoalesce *float64 - TCPRcvQDrop *float64 - TCPOFOQueue *float64 - TCPOFODrop *float64 - TCPOFOMerge *float64 - TCPChallengeACK *float64 - TCPSYNChallenge *float64 - TCPFastOpenActive *float64 - TCPFastOpenActiveFail *float64 - TCPFastOpenPassive *float64 - TCPFastOpenPassiveFail *float64 - TCPFastOpenListenOverflow *float64 - TCPFastOpenCookieReqd *float64 - TCPFastOpenBlackhole *float64 - TCPSpuriousRtxHostQueues *float64 - BusyPollRxPackets *float64 - TCPAutoCorking *float64 - TCPFromZeroWindowAdv *float64 - TCPToZeroWindowAdv *float64 - TCPWantZeroWindowAdv *float64 - TCPSynRetrans *float64 - TCPOrigDataSent *float64 - TCPHystartTrainDetect *float64 - TCPHystartTrainCwnd *float64 - TCPHystartDelayDetect *float64 - TCPHystartDelayCwnd *float64 - TCPACKSkippedSynRecv *float64 - TCPACKSkippedPAWS *float64 - TCPACKSkippedSeq *float64 - TCPACKSkippedFinWait2 *float64 - TCPACKSkippedTimeWait *float64 - TCPACKSkippedChallenge *float64 - TCPWinProbe *float64 - TCPKeepAlive *float64 - TCPMTUPFail *float64 - TCPMTUPSuccess *float64 - TCPWqueueTooBig *float64 -} - -type IpExt struct { // nolint:revive - InNoRoutes *float64 - InTruncatedPkts *float64 - InMcastPkts *float64 - OutMcastPkts *float64 - InBcastPkts *float64 - OutBcastPkts *float64 - InOctets *float64 - OutOctets *float64 - InMcastOctets *float64 - OutMcastOctets *float64 - InBcastOctets *float64 - OutBcastOctets *float64 - InCsumErrors *float64 - InNoECTPkts *float64 - InECT1Pkts *float64 - InECT0Pkts *float64 - InCEPkts *float64 - ReasmOverlaps *float64 -} - -func (p Proc) Netstat() (ProcNetstat, error) { - filename := p.path("net/netstat") - data, err := util.ReadFileNoStat(filename) - if err != nil { - return ProcNetstat{PID: p.PID}, err - } - procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename) - procNetstat.PID = p.PID - return procNetstat, err -} - -// parseProcNetstat parses the metrics from proc//net/netstat file -// and returns a ProcNetstat structure. -func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) { - var ( - scanner = bufio.NewScanner(r) - procNetstat = ProcNetstat{} - ) - - for scanner.Scan() { - nameParts := strings.Split(scanner.Text(), " ") - scanner.Scan() - valueParts := strings.Split(scanner.Text(), " ") - // Remove trailing :. - protocol := strings.TrimSuffix(nameParts[0], ":") - if len(nameParts) != len(valueParts) { - return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) - } - for i := 1; i < len(nameParts); i++ { - value, err := strconv.ParseFloat(valueParts[i], 64) - if err != nil { - return procNetstat, err - } - key := nameParts[i] - - switch protocol { - case "TcpExt": - switch key { - case "SyncookiesSent": - procNetstat.TcpExt.SyncookiesSent = &value - case "SyncookiesRecv": - procNetstat.TcpExt.SyncookiesRecv = &value - case "SyncookiesFailed": - procNetstat.TcpExt.SyncookiesFailed = &value - case "EmbryonicRsts": - procNetstat.TcpExt.EmbryonicRsts = &value - case "PruneCalled": - procNetstat.TcpExt.PruneCalled = &value - case "RcvPruned": - procNetstat.TcpExt.RcvPruned = &value - case "OfoPruned": - procNetstat.TcpExt.OfoPruned = &value - case "OutOfWindowIcmps": - procNetstat.TcpExt.OutOfWindowIcmps = &value - case "LockDroppedIcmps": - procNetstat.TcpExt.LockDroppedIcmps = &value - case "ArpFilter": - procNetstat.TcpExt.ArpFilter = &value - case "TW": - procNetstat.TcpExt.TW = &value - case "TWRecycled": - procNetstat.TcpExt.TWRecycled = &value - case "TWKilled": - procNetstat.TcpExt.TWKilled = &value - case "PAWSActive": - procNetstat.TcpExt.PAWSActive = &value - case "PAWSEstab": - procNetstat.TcpExt.PAWSEstab = &value - case "DelayedACKs": - procNetstat.TcpExt.DelayedACKs = &value - case "DelayedACKLocked": - procNetstat.TcpExt.DelayedACKLocked = &value - case "DelayedACKLost": - procNetstat.TcpExt.DelayedACKLost = &value - case "ListenOverflows": - procNetstat.TcpExt.ListenOverflows = &value - case "ListenDrops": - procNetstat.TcpExt.ListenDrops = &value - case "TCPHPHits": - procNetstat.TcpExt.TCPHPHits = &value - case "TCPPureAcks": - procNetstat.TcpExt.TCPPureAcks = &value - case "TCPHPAcks": - procNetstat.TcpExt.TCPHPAcks = &value - case "TCPRenoRecovery": - procNetstat.TcpExt.TCPRenoRecovery = &value - case "TCPSackRecovery": - procNetstat.TcpExt.TCPSackRecovery = &value - case "TCPSACKReneging": - procNetstat.TcpExt.TCPSACKReneging = &value - case "TCPSACKReorder": - procNetstat.TcpExt.TCPSACKReorder = &value - case "TCPRenoReorder": - procNetstat.TcpExt.TCPRenoReorder = &value - case "TCPTSReorder": - procNetstat.TcpExt.TCPTSReorder = &value - case "TCPFullUndo": - procNetstat.TcpExt.TCPFullUndo = &value - case "TCPPartialUndo": - procNetstat.TcpExt.TCPPartialUndo = &value - case "TCPDSACKUndo": - procNetstat.TcpExt.TCPDSACKUndo = &value - case "TCPLossUndo": - procNetstat.TcpExt.TCPLossUndo = &value - case "TCPLostRetransmit": - procNetstat.TcpExt.TCPLostRetransmit = &value - case "TCPRenoFailures": - procNetstat.TcpExt.TCPRenoFailures = &value - case "TCPSackFailures": - procNetstat.TcpExt.TCPSackFailures = &value - case "TCPLossFailures": - procNetstat.TcpExt.TCPLossFailures = &value - case "TCPFastRetrans": - procNetstat.TcpExt.TCPFastRetrans = &value - case "TCPSlowStartRetrans": - procNetstat.TcpExt.TCPSlowStartRetrans = &value - case "TCPTimeouts": - procNetstat.TcpExt.TCPTimeouts = &value - case "TCPLossProbes": - procNetstat.TcpExt.TCPLossProbes = &value - case "TCPLossProbeRecovery": - procNetstat.TcpExt.TCPLossProbeRecovery = &value - case "TCPRenoRecoveryFail": - procNetstat.TcpExt.TCPRenoRecoveryFail = &value - case "TCPSackRecoveryFail": - procNetstat.TcpExt.TCPSackRecoveryFail = &value - case "TCPRcvCollapsed": - procNetstat.TcpExt.TCPRcvCollapsed = &value - case "TCPDSACKOldSent": - procNetstat.TcpExt.TCPDSACKOldSent = &value - case "TCPDSACKOfoSent": - procNetstat.TcpExt.TCPDSACKOfoSent = &value - case "TCPDSACKRecv": - procNetstat.TcpExt.TCPDSACKRecv = &value - case "TCPDSACKOfoRecv": - procNetstat.TcpExt.TCPDSACKOfoRecv = &value - case "TCPAbortOnData": - procNetstat.TcpExt.TCPAbortOnData = &value - case "TCPAbortOnClose": - procNetstat.TcpExt.TCPAbortOnClose = &value - case "TCPDeferAcceptDrop": - procNetstat.TcpExt.TCPDeferAcceptDrop = &value - case "IPReversePathFilter": - procNetstat.TcpExt.IPReversePathFilter = &value - case "TCPTimeWaitOverflow": - procNetstat.TcpExt.TCPTimeWaitOverflow = &value - case "TCPReqQFullDoCookies": - procNetstat.TcpExt.TCPReqQFullDoCookies = &value - case "TCPReqQFullDrop": - procNetstat.TcpExt.TCPReqQFullDrop = &value - case "TCPRetransFail": - procNetstat.TcpExt.TCPRetransFail = &value - case "TCPRcvCoalesce": - procNetstat.TcpExt.TCPRcvCoalesce = &value - case "TCPRcvQDrop": - procNetstat.TcpExt.TCPRcvQDrop = &value - case "TCPOFOQueue": - procNetstat.TcpExt.TCPOFOQueue = &value - case "TCPOFODrop": - procNetstat.TcpExt.TCPOFODrop = &value - case "TCPOFOMerge": - procNetstat.TcpExt.TCPOFOMerge = &value - case "TCPChallengeACK": - procNetstat.TcpExt.TCPChallengeACK = &value - case "TCPSYNChallenge": - procNetstat.TcpExt.TCPSYNChallenge = &value - case "TCPFastOpenActive": - procNetstat.TcpExt.TCPFastOpenActive = &value - case "TCPFastOpenActiveFail": - procNetstat.TcpExt.TCPFastOpenActiveFail = &value - case "TCPFastOpenPassive": - procNetstat.TcpExt.TCPFastOpenPassive = &value - case "TCPFastOpenPassiveFail": - procNetstat.TcpExt.TCPFastOpenPassiveFail = &value - case "TCPFastOpenListenOverflow": - procNetstat.TcpExt.TCPFastOpenListenOverflow = &value - case "TCPFastOpenCookieReqd": - procNetstat.TcpExt.TCPFastOpenCookieReqd = &value - case "TCPFastOpenBlackhole": - procNetstat.TcpExt.TCPFastOpenBlackhole = &value - case "TCPSpuriousRtxHostQueues": - procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value - case "BusyPollRxPackets": - procNetstat.TcpExt.BusyPollRxPackets = &value - case "TCPAutoCorking": - procNetstat.TcpExt.TCPAutoCorking = &value - case "TCPFromZeroWindowAdv": - procNetstat.TcpExt.TCPFromZeroWindowAdv = &value - case "TCPToZeroWindowAdv": - procNetstat.TcpExt.TCPToZeroWindowAdv = &value - case "TCPWantZeroWindowAdv": - procNetstat.TcpExt.TCPWantZeroWindowAdv = &value - case "TCPSynRetrans": - procNetstat.TcpExt.TCPSynRetrans = &value - case "TCPOrigDataSent": - procNetstat.TcpExt.TCPOrigDataSent = &value - case "TCPHystartTrainDetect": - procNetstat.TcpExt.TCPHystartTrainDetect = &value - case "TCPHystartTrainCwnd": - procNetstat.TcpExt.TCPHystartTrainCwnd = &value - case "TCPHystartDelayDetect": - procNetstat.TcpExt.TCPHystartDelayDetect = &value - case "TCPHystartDelayCwnd": - procNetstat.TcpExt.TCPHystartDelayCwnd = &value - case "TCPACKSkippedSynRecv": - procNetstat.TcpExt.TCPACKSkippedSynRecv = &value - case "TCPACKSkippedPAWS": - procNetstat.TcpExt.TCPACKSkippedPAWS = &value - case "TCPACKSkippedSeq": - procNetstat.TcpExt.TCPACKSkippedSeq = &value - case "TCPACKSkippedFinWait2": - procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value - case "TCPACKSkippedTimeWait": - procNetstat.TcpExt.TCPACKSkippedTimeWait = &value - case "TCPACKSkippedChallenge": - procNetstat.TcpExt.TCPACKSkippedChallenge = &value - case "TCPWinProbe": - procNetstat.TcpExt.TCPWinProbe = &value - case "TCPKeepAlive": - procNetstat.TcpExt.TCPKeepAlive = &value - case "TCPMTUPFail": - procNetstat.TcpExt.TCPMTUPFail = &value - case "TCPMTUPSuccess": - procNetstat.TcpExt.TCPMTUPSuccess = &value - case "TCPWqueueTooBig": - procNetstat.TcpExt.TCPWqueueTooBig = &value - } - case "IpExt": - switch key { - case "InNoRoutes": - procNetstat.IpExt.InNoRoutes = &value - case "InTruncatedPkts": - procNetstat.IpExt.InTruncatedPkts = &value - case "InMcastPkts": - procNetstat.IpExt.InMcastPkts = &value - case "OutMcastPkts": - procNetstat.IpExt.OutMcastPkts = &value - case "InBcastPkts": - procNetstat.IpExt.InBcastPkts = &value - case "OutBcastPkts": - procNetstat.IpExt.OutBcastPkts = &value - case "InOctets": - procNetstat.IpExt.InOctets = &value - case "OutOctets": - procNetstat.IpExt.OutOctets = &value - case "InMcastOctets": - procNetstat.IpExt.InMcastOctets = &value - case "OutMcastOctets": - procNetstat.IpExt.OutMcastOctets = &value - case "InBcastOctets": - procNetstat.IpExt.InBcastOctets = &value - case "OutBcastOctets": - procNetstat.IpExt.OutBcastOctets = &value - case "InCsumErrors": - procNetstat.IpExt.InCsumErrors = &value - case "InNoECTPkts": - procNetstat.IpExt.InNoECTPkts = &value - case "InECT1Pkts": - procNetstat.IpExt.InECT1Pkts = &value - case "InECT0Pkts": - procNetstat.IpExt.InECT0Pkts = &value - case "InCEPkts": - procNetstat.IpExt.InCEPkts = &value - case "ReasmOverlaps": - procNetstat.IpExt.ReasmOverlaps = &value - } - } - } - } - return procNetstat, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index 391b4cbd1..000000000 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// Namespaces reads from /proc//ns/* to get the namespaces of which the -// process is a member. -func (p Proc) Namespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %w", err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go deleted file mode 100644 index a68fe1529..000000000 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// The PSI / pressure interface is described at -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt -// Each resource (cpu, io, memory, ...) is exposed as a single file. -// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. -// Each line contains several averages (over n seconds) and a total in µs. -// -// Example io pressure file: -// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 -// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" - -// PSILine is a single line of values as returned by `/proc/pressure/*`. -// -// The Avg entries are averages over n seconds, as a percentage. -// The Total line is in microseconds. -type PSILine struct { - Avg10 float64 - Avg60 float64 - Avg300 float64 - Total uint64 -} - -// PSIStats represent pressure stall information from /proc/pressure/* -// -// "Some" indicates the share of time in which at least some tasks are stalled. -// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously. -type PSIStats struct { - Some *PSILine - Full *PSILine -} - -// PSIStatsForResource reads pressure stall information for the specified -// resource from /proc/pressure/. At time of writing this can be -// either "cpu", "memory" or "io". -func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) - if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err) - } - - return parsePSIStats(resource, bytes.NewReader(data)) -} - -// parsePSIStats parses the specified file for pressure stall information. -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { - psiStats := PSIStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - l := scanner.Text() - prefix := strings.Split(l, " ")[0] - switch prefix { - case "some": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Some = &psi - case "full": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Full = &psi - default: - // If we encounter a line with an unknown prefix, ignore it and move on - // Should new measurement types be added in the future we'll simply ignore them instead - // of erroring on retrieval - continue - } - } - - return psiStats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go deleted file mode 100644 index 0e97d9957..000000000 --- a/vendor/github.com/prometheus/procfs/proc_smaps.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "bufio" - "errors" - "fmt" - "os" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - // match the header line before each mapped zone in `/proc/pid/smaps`. - procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`) -) - -type ProcSMapsRollup struct { - // Amount of the mapping that is currently resident in RAM. - Rss uint64 - // Process's proportional share of this mapping. - Pss uint64 - // Size in bytes of clean shared pages. - SharedClean uint64 - // Size in bytes of dirty shared pages. - SharedDirty uint64 - // Size in bytes of clean private pages. - PrivateClean uint64 - // Size in bytes of dirty private pages. - PrivateDirty uint64 - // Amount of memory currently marked as referenced or accessed. - Referenced uint64 - // Amount of memory that does not belong to any file. - Anonymous uint64 - // Amount would-be-anonymous memory currently on swap. - Swap uint64 - // Process's proportional memory on swap. - SwapPss uint64 -} - -// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the -// process. -// -// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will -// we read and summed. -func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) { - data, err := util.ReadFileNoStat(p.path("smaps_rollup")) - if err != nil && os.IsNotExist(err) { - return p.procSMapsRollupManual() - } - if err != nil { - return ProcSMapsRollup{}, err - } - - lines := strings.Split(string(data), "\n") - smaps := ProcSMapsRollup{} - - // skip first line which don't contains information we need - lines = lines[1:] - for _, line := range lines { - if line == "" { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -// Read /proc/pid/smaps and do the roll-up in Go code. -func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) { - file, err := os.Open(p.path("smaps")) - if err != nil { - return ProcSMapsRollup{}, err - } - defer file.Close() - - smaps := ProcSMapsRollup{} - scan := bufio.NewScanner(file) - - for scan.Scan() { - line := scan.Text() - - if procSMapsHeaderLine.MatchString(line) { - continue - } - - if err := smaps.parseLine(line); err != nil { - return ProcSMapsRollup{}, err - } - } - - return smaps, nil -} - -func (s *ProcSMapsRollup) parseLine(line string) error { - kv := strings.SplitN(line, ":", 2) - if len(kv) != 2 { - fmt.Println(line) - return errors.New("invalid net/dev line, missing colon") - } - - k := kv[0] - if k == "VmFlags" { - return nil - } - - v := strings.TrimSpace(kv[1]) - v = strings.TrimRight(v, " kB") - - vKBytes, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return err - } - vBytes := vKBytes * 1024 - - s.addValue(k, v, vKBytes, vBytes) - - return nil -} - -func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) { - switch k { - case "Rss": - s.Rss += vUintBytes - case "Pss": - s.Pss += vUintBytes - case "Shared_Clean": - s.SharedClean += vUintBytes - case "Shared_Dirty": - s.SharedDirty += vUintBytes - case "Private_Clean": - s.PrivateClean += vUintBytes - case "Private_Dirty": - s.PrivateDirty += vUintBytes - case "Referenced": - s.Referenced += vUintBytes - case "Anonymous": - s.Anonymous += vUintBytes - case "Swap": - s.Swap += vUintBytes - case "SwapPss": - s.SwapPss += vUintBytes - } -} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go deleted file mode 100644 index 6c46b7188..000000000 --- a/vendor/github.com/prometheus/procfs/proc_snmp.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcSnmp models the content of /proc//net/snmp. -type ProcSnmp struct { - // The process ID. - PID int - Ip - Icmp - IcmpMsg - Tcp - Udp - UdpLite -} - -type Ip struct { // nolint:revive - Forwarding *float64 - DefaultTTL *float64 - InReceives *float64 - InHdrErrors *float64 - InAddrErrors *float64 - ForwDatagrams *float64 - InUnknownProtos *float64 - InDiscards *float64 - InDelivers *float64 - OutRequests *float64 - OutDiscards *float64 - OutNoRoutes *float64 - ReasmTimeout *float64 - ReasmReqds *float64 - ReasmOKs *float64 - ReasmFails *float64 - FragOKs *float64 - FragFails *float64 - FragCreates *float64 -} - -type Icmp struct { // nolint:revive - InMsgs *float64 - InErrors *float64 - InCsumErrors *float64 - InDestUnreachs *float64 - InTimeExcds *float64 - InParmProbs *float64 - InSrcQuenchs *float64 - InRedirects *float64 - InEchos *float64 - InEchoReps *float64 - InTimestamps *float64 - InTimestampReps *float64 - InAddrMasks *float64 - InAddrMaskReps *float64 - OutMsgs *float64 - OutErrors *float64 - OutDestUnreachs *float64 - OutTimeExcds *float64 - OutParmProbs *float64 - OutSrcQuenchs *float64 - OutRedirects *float64 - OutEchos *float64 - OutEchoReps *float64 - OutTimestamps *float64 - OutTimestampReps *float64 - OutAddrMasks *float64 - OutAddrMaskReps *float64 -} - -type IcmpMsg struct { - InType3 *float64 - OutType3 *float64 -} - -type Tcp struct { // nolint:revive - RtoAlgorithm *float64 - RtoMin *float64 - RtoMax *float64 - MaxConn *float64 - ActiveOpens *float64 - PassiveOpens *float64 - AttemptFails *float64 - EstabResets *float64 - CurrEstab *float64 - InSegs *float64 - OutSegs *float64 - RetransSegs *float64 - InErrs *float64 - OutRsts *float64 - InCsumErrors *float64 -} - -type Udp struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -type UdpLite struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -func (p Proc) Snmp() (ProcSnmp, error) { - filename := p.path("net/snmp") - data, err := util.ReadFileNoStat(filename) - if err != nil { - return ProcSnmp{PID: p.PID}, err - } - procSnmp, err := parseSnmp(bytes.NewReader(data), filename) - procSnmp.PID = p.PID - return procSnmp, err -} - -// parseSnmp parses the metrics from proc//net/snmp file -// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}). -func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) { - var ( - scanner = bufio.NewScanner(r) - procSnmp = ProcSnmp{} - ) - - for scanner.Scan() { - nameParts := strings.Split(scanner.Text(), " ") - scanner.Scan() - valueParts := strings.Split(scanner.Text(), " ") - // Remove trailing :. - protocol := strings.TrimSuffix(nameParts[0], ":") - if len(nameParts) != len(valueParts) { - return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s", - fileName, protocol) - } - for i := 1; i < len(nameParts); i++ { - value, err := strconv.ParseFloat(valueParts[i], 64) - if err != nil { - return procSnmp, err - } - key := nameParts[i] - - switch protocol { - case "Ip": - switch key { - case "Forwarding": - procSnmp.Ip.Forwarding = &value - case "DefaultTTL": - procSnmp.Ip.DefaultTTL = &value - case "InReceives": - procSnmp.Ip.InReceives = &value - case "InHdrErrors": - procSnmp.Ip.InHdrErrors = &value - case "InAddrErrors": - procSnmp.Ip.InAddrErrors = &value - case "ForwDatagrams": - procSnmp.Ip.ForwDatagrams = &value - case "InUnknownProtos": - procSnmp.Ip.InUnknownProtos = &value - case "InDiscards": - procSnmp.Ip.InDiscards = &value - case "InDelivers": - procSnmp.Ip.InDelivers = &value - case "OutRequests": - procSnmp.Ip.OutRequests = &value - case "OutDiscards": - procSnmp.Ip.OutDiscards = &value - case "OutNoRoutes": - procSnmp.Ip.OutNoRoutes = &value - case "ReasmTimeout": - procSnmp.Ip.ReasmTimeout = &value - case "ReasmReqds": - procSnmp.Ip.ReasmReqds = &value - case "ReasmOKs": - procSnmp.Ip.ReasmOKs = &value - case "ReasmFails": - procSnmp.Ip.ReasmFails = &value - case "FragOKs": - procSnmp.Ip.FragOKs = &value - case "FragFails": - procSnmp.Ip.FragFails = &value - case "FragCreates": - procSnmp.Ip.FragCreates = &value - } - case "Icmp": - switch key { - case "InMsgs": - procSnmp.Icmp.InMsgs = &value - case "InErrors": - procSnmp.Icmp.InErrors = &value - case "InCsumErrors": - procSnmp.Icmp.InCsumErrors = &value - case "InDestUnreachs": - procSnmp.Icmp.InDestUnreachs = &value - case "InTimeExcds": - procSnmp.Icmp.InTimeExcds = &value - case "InParmProbs": - procSnmp.Icmp.InParmProbs = &value - case "InSrcQuenchs": - procSnmp.Icmp.InSrcQuenchs = &value - case "InRedirects": - procSnmp.Icmp.InRedirects = &value - case "InEchos": - procSnmp.Icmp.InEchos = &value - case "InEchoReps": - procSnmp.Icmp.InEchoReps = &value - case "InTimestamps": - procSnmp.Icmp.InTimestamps = &value - case "InTimestampReps": - procSnmp.Icmp.InTimestampReps = &value - case "InAddrMasks": - procSnmp.Icmp.InAddrMasks = &value - case "InAddrMaskReps": - procSnmp.Icmp.InAddrMaskReps = &value - case "OutMsgs": - procSnmp.Icmp.OutMsgs = &value - case "OutErrors": - procSnmp.Icmp.OutErrors = &value - case "OutDestUnreachs": - procSnmp.Icmp.OutDestUnreachs = &value - case "OutTimeExcds": - procSnmp.Icmp.OutTimeExcds = &value - case "OutParmProbs": - procSnmp.Icmp.OutParmProbs = &value - case "OutSrcQuenchs": - procSnmp.Icmp.OutSrcQuenchs = &value - case "OutRedirects": - procSnmp.Icmp.OutRedirects = &value - case "OutEchos": - procSnmp.Icmp.OutEchos = &value - case "OutEchoReps": - procSnmp.Icmp.OutEchoReps = &value - case "OutTimestamps": - procSnmp.Icmp.OutTimestamps = &value - case "OutTimestampReps": - procSnmp.Icmp.OutTimestampReps = &value - case "OutAddrMasks": - procSnmp.Icmp.OutAddrMasks = &value - case "OutAddrMaskReps": - procSnmp.Icmp.OutAddrMaskReps = &value - } - case "IcmpMsg": - switch key { - case "InType3": - procSnmp.IcmpMsg.InType3 = &value - case "OutType3": - procSnmp.IcmpMsg.OutType3 = &value - } - case "Tcp": - switch key { - case "RtoAlgorithm": - procSnmp.Tcp.RtoAlgorithm = &value - case "RtoMin": - procSnmp.Tcp.RtoMin = &value - case "RtoMax": - procSnmp.Tcp.RtoMax = &value - case "MaxConn": - procSnmp.Tcp.MaxConn = &value - case "ActiveOpens": - procSnmp.Tcp.ActiveOpens = &value - case "PassiveOpens": - procSnmp.Tcp.PassiveOpens = &value - case "AttemptFails": - procSnmp.Tcp.AttemptFails = &value - case "EstabResets": - procSnmp.Tcp.EstabResets = &value - case "CurrEstab": - procSnmp.Tcp.CurrEstab = &value - case "InSegs": - procSnmp.Tcp.InSegs = &value - case "OutSegs": - procSnmp.Tcp.OutSegs = &value - case "RetransSegs": - procSnmp.Tcp.RetransSegs = &value - case "InErrs": - procSnmp.Tcp.InErrs = &value - case "OutRsts": - procSnmp.Tcp.OutRsts = &value - case "InCsumErrors": - procSnmp.Tcp.InCsumErrors = &value - } - case "Udp": - switch key { - case "InDatagrams": - procSnmp.Udp.InDatagrams = &value - case "NoPorts": - procSnmp.Udp.NoPorts = &value - case "InErrors": - procSnmp.Udp.InErrors = &value - case "OutDatagrams": - procSnmp.Udp.OutDatagrams = &value - case "RcvbufErrors": - procSnmp.Udp.RcvbufErrors = &value - case "SndbufErrors": - procSnmp.Udp.SndbufErrors = &value - case "InCsumErrors": - procSnmp.Udp.InCsumErrors = &value - case "IgnoredMulti": - procSnmp.Udp.IgnoredMulti = &value - } - case "UdpLite": - switch key { - case "InDatagrams": - procSnmp.UdpLite.InDatagrams = &value - case "NoPorts": - procSnmp.UdpLite.NoPorts = &value - case "InErrors": - procSnmp.UdpLite.InErrors = &value - case "OutDatagrams": - procSnmp.UdpLite.OutDatagrams = &value - case "RcvbufErrors": - procSnmp.UdpLite.RcvbufErrors = &value - case "SndbufErrors": - procSnmp.UdpLite.SndbufErrors = &value - case "InCsumErrors": - procSnmp.UdpLite.InCsumErrors = &value - case "IgnoredMulti": - procSnmp.UdpLite.IgnoredMulti = &value - } - } - } - } - return procSnmp, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go deleted file mode 100644 index 3059cc6a1..000000000 --- a/vendor/github.com/prometheus/procfs/proc_snmp6.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "io" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcSnmp6 models the content of /proc//net/snmp6. -type ProcSnmp6 struct { - // The process ID. - PID int - Ip6 - Icmp6 - Udp6 - UdpLite6 -} - -type Ip6 struct { // nolint:revive - InReceives *float64 - InHdrErrors *float64 - InTooBigErrors *float64 - InNoRoutes *float64 - InAddrErrors *float64 - InUnknownProtos *float64 - InTruncatedPkts *float64 - InDiscards *float64 - InDelivers *float64 - OutForwDatagrams *float64 - OutRequests *float64 - OutDiscards *float64 - OutNoRoutes *float64 - ReasmTimeout *float64 - ReasmReqds *float64 - ReasmOKs *float64 - ReasmFails *float64 - FragOKs *float64 - FragFails *float64 - FragCreates *float64 - InMcastPkts *float64 - OutMcastPkts *float64 - InOctets *float64 - OutOctets *float64 - InMcastOctets *float64 - OutMcastOctets *float64 - InBcastOctets *float64 - OutBcastOctets *float64 - InNoECTPkts *float64 - InECT1Pkts *float64 - InECT0Pkts *float64 - InCEPkts *float64 -} - -type Icmp6 struct { - InMsgs *float64 - InErrors *float64 - OutMsgs *float64 - OutErrors *float64 - InCsumErrors *float64 - InDestUnreachs *float64 - InPktTooBigs *float64 - InTimeExcds *float64 - InParmProblems *float64 - InEchos *float64 - InEchoReplies *float64 - InGroupMembQueries *float64 - InGroupMembResponses *float64 - InGroupMembReductions *float64 - InRouterSolicits *float64 - InRouterAdvertisements *float64 - InNeighborSolicits *float64 - InNeighborAdvertisements *float64 - InRedirects *float64 - InMLDv2Reports *float64 - OutDestUnreachs *float64 - OutPktTooBigs *float64 - OutTimeExcds *float64 - OutParmProblems *float64 - OutEchos *float64 - OutEchoReplies *float64 - OutGroupMembQueries *float64 - OutGroupMembResponses *float64 - OutGroupMembReductions *float64 - OutRouterSolicits *float64 - OutRouterAdvertisements *float64 - OutNeighborSolicits *float64 - OutNeighborAdvertisements *float64 - OutRedirects *float64 - OutMLDv2Reports *float64 - InType1 *float64 - InType134 *float64 - InType135 *float64 - InType136 *float64 - InType143 *float64 - OutType133 *float64 - OutType135 *float64 - OutType136 *float64 - OutType143 *float64 -} - -type Udp6 struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 - IgnoredMulti *float64 -} - -type UdpLite6 struct { // nolint:revive - InDatagrams *float64 - NoPorts *float64 - InErrors *float64 - OutDatagrams *float64 - RcvbufErrors *float64 - SndbufErrors *float64 - InCsumErrors *float64 -} - -func (p Proc) Snmp6() (ProcSnmp6, error) { - filename := p.path("net/snmp6") - data, err := util.ReadFileNoStat(filename) - if err != nil { - // On systems with IPv6 disabled, this file won't exist. - // Do nothing. - if errors.Is(err, os.ErrNotExist) { - return ProcSnmp6{PID: p.PID}, nil - } - - return ProcSnmp6{PID: p.PID}, err - } - - procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data)) - procSnmp6.PID = p.PID - return procSnmp6, err -} - -// parseSnmp6 parses the metrics from proc//net/snmp6 file -// and returns a map contains those metrics. -func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) { - var ( - scanner = bufio.NewScanner(r) - procSnmp6 = ProcSnmp6{} - ) - - for scanner.Scan() { - stat := strings.Fields(scanner.Text()) - if len(stat) < 2 { - continue - } - // Expect to have "6" in metric name, skip line otherwise - if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 { - protocol := stat[0][:sixIndex+1] - key := stat[0][sixIndex+1:] - value, err := strconv.ParseFloat(stat[1], 64) - if err != nil { - return procSnmp6, err - } - - switch protocol { - case "Ip6": - switch key { - case "InReceives": - procSnmp6.Ip6.InReceives = &value - case "InHdrErrors": - procSnmp6.Ip6.InHdrErrors = &value - case "InTooBigErrors": - procSnmp6.Ip6.InTooBigErrors = &value - case "InNoRoutes": - procSnmp6.Ip6.InNoRoutes = &value - case "InAddrErrors": - procSnmp6.Ip6.InAddrErrors = &value - case "InUnknownProtos": - procSnmp6.Ip6.InUnknownProtos = &value - case "InTruncatedPkts": - procSnmp6.Ip6.InTruncatedPkts = &value - case "InDiscards": - procSnmp6.Ip6.InDiscards = &value - case "InDelivers": - procSnmp6.Ip6.InDelivers = &value - case "OutForwDatagrams": - procSnmp6.Ip6.OutForwDatagrams = &value - case "OutRequests": - procSnmp6.Ip6.OutRequests = &value - case "OutDiscards": - procSnmp6.Ip6.OutDiscards = &value - case "OutNoRoutes": - procSnmp6.Ip6.OutNoRoutes = &value - case "ReasmTimeout": - procSnmp6.Ip6.ReasmTimeout = &value - case "ReasmReqds": - procSnmp6.Ip6.ReasmReqds = &value - case "ReasmOKs": - procSnmp6.Ip6.ReasmOKs = &value - case "ReasmFails": - procSnmp6.Ip6.ReasmFails = &value - case "FragOKs": - procSnmp6.Ip6.FragOKs = &value - case "FragFails": - procSnmp6.Ip6.FragFails = &value - case "FragCreates": - procSnmp6.Ip6.FragCreates = &value - case "InMcastPkts": - procSnmp6.Ip6.InMcastPkts = &value - case "OutMcastPkts": - procSnmp6.Ip6.OutMcastPkts = &value - case "InOctets": - procSnmp6.Ip6.InOctets = &value - case "OutOctets": - procSnmp6.Ip6.OutOctets = &value - case "InMcastOctets": - procSnmp6.Ip6.InMcastOctets = &value - case "OutMcastOctets": - procSnmp6.Ip6.OutMcastOctets = &value - case "InBcastOctets": - procSnmp6.Ip6.InBcastOctets = &value - case "OutBcastOctets": - procSnmp6.Ip6.OutBcastOctets = &value - case "InNoECTPkts": - procSnmp6.Ip6.InNoECTPkts = &value - case "InECT1Pkts": - procSnmp6.Ip6.InECT1Pkts = &value - case "InECT0Pkts": - procSnmp6.Ip6.InECT0Pkts = &value - case "InCEPkts": - procSnmp6.Ip6.InCEPkts = &value - - } - case "Icmp6": - switch key { - case "InMsgs": - procSnmp6.Icmp6.InMsgs = &value - case "InErrors": - procSnmp6.Icmp6.InErrors = &value - case "OutMsgs": - procSnmp6.Icmp6.OutMsgs = &value - case "OutErrors": - procSnmp6.Icmp6.OutErrors = &value - case "InCsumErrors": - procSnmp6.Icmp6.InCsumErrors = &value - case "InDestUnreachs": - procSnmp6.Icmp6.InDestUnreachs = &value - case "InPktTooBigs": - procSnmp6.Icmp6.InPktTooBigs = &value - case "InTimeExcds": - procSnmp6.Icmp6.InTimeExcds = &value - case "InParmProblems": - procSnmp6.Icmp6.InParmProblems = &value - case "InEchos": - procSnmp6.Icmp6.InEchos = &value - case "InEchoReplies": - procSnmp6.Icmp6.InEchoReplies = &value - case "InGroupMembQueries": - procSnmp6.Icmp6.InGroupMembQueries = &value - case "InGroupMembResponses": - procSnmp6.Icmp6.InGroupMembResponses = &value - case "InGroupMembReductions": - procSnmp6.Icmp6.InGroupMembReductions = &value - case "InRouterSolicits": - procSnmp6.Icmp6.InRouterSolicits = &value - case "InRouterAdvertisements": - procSnmp6.Icmp6.InRouterAdvertisements = &value - case "InNeighborSolicits": - procSnmp6.Icmp6.InNeighborSolicits = &value - case "InNeighborAdvertisements": - procSnmp6.Icmp6.InNeighborAdvertisements = &value - case "InRedirects": - procSnmp6.Icmp6.InRedirects = &value - case "InMLDv2Reports": - procSnmp6.Icmp6.InMLDv2Reports = &value - case "OutDestUnreachs": - procSnmp6.Icmp6.OutDestUnreachs = &value - case "OutPktTooBigs": - procSnmp6.Icmp6.OutPktTooBigs = &value - case "OutTimeExcds": - procSnmp6.Icmp6.OutTimeExcds = &value - case "OutParmProblems": - procSnmp6.Icmp6.OutParmProblems = &value - case "OutEchos": - procSnmp6.Icmp6.OutEchos = &value - case "OutEchoReplies": - procSnmp6.Icmp6.OutEchoReplies = &value - case "OutGroupMembQueries": - procSnmp6.Icmp6.OutGroupMembQueries = &value - case "OutGroupMembResponses": - procSnmp6.Icmp6.OutGroupMembResponses = &value - case "OutGroupMembReductions": - procSnmp6.Icmp6.OutGroupMembReductions = &value - case "OutRouterSolicits": - procSnmp6.Icmp6.OutRouterSolicits = &value - case "OutRouterAdvertisements": - procSnmp6.Icmp6.OutRouterAdvertisements = &value - case "OutNeighborSolicits": - procSnmp6.Icmp6.OutNeighborSolicits = &value - case "OutNeighborAdvertisements": - procSnmp6.Icmp6.OutNeighborAdvertisements = &value - case "OutRedirects": - procSnmp6.Icmp6.OutRedirects = &value - case "OutMLDv2Reports": - procSnmp6.Icmp6.OutMLDv2Reports = &value - case "InType1": - procSnmp6.Icmp6.InType1 = &value - case "InType134": - procSnmp6.Icmp6.InType134 = &value - case "InType135": - procSnmp6.Icmp6.InType135 = &value - case "InType136": - procSnmp6.Icmp6.InType136 = &value - case "InType143": - procSnmp6.Icmp6.InType143 = &value - case "OutType133": - procSnmp6.Icmp6.OutType133 = &value - case "OutType135": - procSnmp6.Icmp6.OutType135 = &value - case "OutType136": - procSnmp6.Icmp6.OutType136 = &value - case "OutType143": - procSnmp6.Icmp6.OutType143 = &value - } - case "Udp6": - switch key { - case "InDatagrams": - procSnmp6.Udp6.InDatagrams = &value - case "NoPorts": - procSnmp6.Udp6.NoPorts = &value - case "InErrors": - procSnmp6.Udp6.InErrors = &value - case "OutDatagrams": - procSnmp6.Udp6.OutDatagrams = &value - case "RcvbufErrors": - procSnmp6.Udp6.RcvbufErrors = &value - case "SndbufErrors": - procSnmp6.Udp6.SndbufErrors = &value - case "InCsumErrors": - procSnmp6.Udp6.InCsumErrors = &value - case "IgnoredMulti": - procSnmp6.Udp6.IgnoredMulti = &value - } - case "UdpLite6": - switch key { - case "InDatagrams": - procSnmp6.UdpLite6.InDatagrams = &value - case "NoPorts": - procSnmp6.UdpLite6.NoPorts = &value - case "InErrors": - procSnmp6.UdpLite6.InErrors = &value - case "OutDatagrams": - procSnmp6.UdpLite6.OutDatagrams = &value - case "RcvbufErrors": - procSnmp6.UdpLite6.RcvbufErrors = &value - case "SndbufErrors": - procSnmp6.UdpLite6.SndbufErrors = &value - case "InCsumErrors": - procSnmp6.UdpLite6.InCsumErrors = &value - } - } - } - } - return procSnmp6, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index b278eb2c2..000000000 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "os" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime int - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime int - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize uint - // Resident set size in pages. - RSS int - // Soft limit in bytes on the rss of the process. - RSSLimit uint64 - // CPU number last executed on. - Processor uint - // Real-time scheduling priority, a number in the range 1 to 99 for processes - // scheduled under a real-time policy, or 0, for non-real-time processes. - RTPriority uint - // Scheduling policy. - Policy uint - // Aggregated block I/O delays, measured in clock ticks (centiseconds). - DelayAcctBlkIOTicks uint64 - - proc fs.FS -} - -// NewStat returns the current status information of the process. -// -// Deprecated: Use p.Stat() instead. -func (p Proc) NewStat() (ProcStat, error) { - return p.Stat() -} - -// Stat returns the current status information of the process. -func (p Proc) Stat() (ProcStat, error) { - data, err := util.ReadFileNoStat(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - - var ( - ignoreInt64 int64 - ignoreUint64 uint64 - - s = ProcStat{PID: p.PID, proc: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data) - } - - s.Comm = string(data[l+1 : r]) - - // Check the following resources for the details about the particular stat - // fields and their data types: - // * https://man7.org/linux/man-pages/man5/proc.5.html - // * https://man7.org/linux/man-pages/man3/scanf.3.html - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignoreInt64, - &s.Starttime, - &s.VSize, - &s.RSS, - &s.RSSLimit, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreUint64, - &ignoreInt64, - &s.Processor, - &s.RTPriority, - &s.Policy, - &s.DelayAcctBlkIOTicks, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() uint { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go deleted file mode 100644 index 3d8c06439..000000000 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStatus struct { - // The process ID. - PID int - // The process name. - Name string - - // Thread group ID. - TGID int - - // Peak virtual memory size. - VmPeak uint64 // nolint:revive - // Virtual memory size. - VmSize uint64 // nolint:revive - // Locked memory size. - VmLck uint64 // nolint:revive - // Pinned memory size. - VmPin uint64 // nolint:revive - // Peak resident set size. - VmHWM uint64 // nolint:revive - // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 // nolint:revive - // Size of resident anonymous memory. - RssAnon uint64 // nolint:revive - // Size of resident file mappings. - RssFile uint64 // nolint:revive - // Size of resident shared memory. - RssShmem uint64 // nolint:revive - // Size of data segments. - VmData uint64 // nolint:revive - // Size of stack segments. - VmStk uint64 // nolint:revive - // Size of text segments. - VmExe uint64 // nolint:revive - // Shared library code size. - VmLib uint64 // nolint:revive - // Page table entries size. - VmPTE uint64 // nolint:revive - // Size of second-level page tables. - VmPMD uint64 // nolint:revive - // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 // nolint:revive - // Size of hugetlb memory portions - HugetlbPages uint64 - - // Number of voluntary context switches. - VoluntaryCtxtSwitches uint64 - // Number of involuntary context switches. - NonVoluntaryCtxtSwitches uint64 - - // UIDs of the process (Real, effective, saved set, and filesystem UIDs) - UIDs [4]string - // GIDs of the process (Real, effective, saved set, and filesystem GIDs) - GIDs [4]string -} - -// NewStatus returns the current status information of the process. -func (p Proc) NewStatus() (ProcStatus, error) { - data, err := util.ReadFileNoStat(p.path("status")) - if err != nil { - return ProcStatus{}, err - } - - s := ProcStatus{PID: p.PID} - - lines := strings.Split(string(data), "\n") - for _, line := range lines { - if !bytes.Contains([]byte(line), []byte(":")) { - continue - } - - kv := strings.SplitN(line, ":", 2) - - // removes spaces - k := strings.TrimSpace(kv[0]) - v := strings.TrimSpace(kv[1]) - // removes "kB" - v = strings.TrimSuffix(v, " kB") - - // value to int when possible - // we can skip error check here, 'cause vKBytes is not used when value is a string - vKBytes, _ := strconv.ParseUint(v, 10, 64) - // convert kB to B - vBytes := vKBytes * 1024 - - s.fillStatus(k, v, vKBytes, vBytes) - } - - return s, nil -} - -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { - switch k { - case "Tgid": - s.TGID = int(vUint) - case "Name": - s.Name = vString - case "Uid": - copy(s.UIDs[:], strings.Split(vString, "\t")) - case "Gid": - copy(s.GIDs[:], strings.Split(vString, "\t")) - case "VmPeak": - s.VmPeak = vUintBytes - case "VmSize": - s.VmSize = vUintBytes - case "VmLck": - s.VmLck = vUintBytes - case "VmPin": - s.VmPin = vUintBytes - case "VmHWM": - s.VmHWM = vUintBytes - case "VmRSS": - s.VmRSS = vUintBytes - case "RssAnon": - s.RssAnon = vUintBytes - case "RssFile": - s.RssFile = vUintBytes - case "RssShmem": - s.RssShmem = vUintBytes - case "VmData": - s.VmData = vUintBytes - case "VmStk": - s.VmStk = vUintBytes - case "VmExe": - s.VmExe = vUintBytes - case "VmLib": - s.VmLib = vUintBytes - case "VmPTE": - s.VmPTE = vUintBytes - case "VmPMD": - s.VmPMD = vUintBytes - case "VmSwap": - s.VmSwap = vUintBytes - case "HugetlbPages": - s.HugetlbPages = vUintBytes - case "voluntary_ctxt_switches": - s.VoluntaryCtxtSwitches = vUint - case "nonvoluntary_ctxt_switches": - s.NonVoluntaryCtxtSwitches = vUint - } -} - -// TotalCtxtSwitches returns the total context switch. -func (s ProcStatus) TotalCtxtSwitches() uint64 { - return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches -} diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go deleted file mode 100644 index d46533ebf..000000000 --- a/vendor/github.com/prometheus/procfs/proc_sys.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -func sysctlToPath(sysctl string) string { - return strings.Replace(sysctl, ".", "/", -1) -} - -func (fs FS) SysctlStrings(sysctl string) ([]string, error) { - value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl))) - if err != nil { - return nil, err - } - return strings.Fields(value), nil - -} - -func (fs FS) SysctlInts(sysctl string) ([]int, error) { - fields, err := fs.SysctlStrings(sysctl) - if err != nil { - return nil, err - } - - values := make([]int, len(fields)) - for i, f := range fields { - vp := util.NewValueParser(f) - values[i] = vp.Int() - if err := vp.Err(); err != nil { - return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err) - } - } - return values, nil -} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go deleted file mode 100644 index 5f7f32dc8..000000000 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "regexp" - "strconv" -) - -var ( - cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) - procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) -) - -// Schedstat contains scheduler statistics from /proc/schedstat -// -// See -// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt -// for a detailed description of what these numbers mean. -// -// Note the current kernel documentation claims some of the time units are in -// jiffies when they are actually in nanoseconds since 2.6.23 with the -// introduction of CFS. A fix to the documentation is pending. See -// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 -type Schedstat struct { - CPUs []*SchedstatCPU -} - -// SchedstatCPU contains the values from one "cpu" line. -type SchedstatCPU struct { - CPUNum string - - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// ProcSchedstat contains the values from `/proc//schedstat`. -type ProcSchedstat struct { - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// Schedstat reads data from `/proc/schedstat`. -func (fs FS) Schedstat() (*Schedstat, error) { - file, err := os.Open(fs.proc.Path("schedstat")) - if err != nil { - return nil, err - } - defer file.Close() - - stats := &Schedstat{} - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - match := cpuLineRE.FindStringSubmatch(scanner.Text()) - if match != nil { - cpu := &SchedstatCPU{} - cpu.CPUNum = match[1] - - cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) - if err != nil { - continue - } - - cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) - if err != nil { - continue - } - - cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) - if err != nil { - continue - } - - stats.CPUs = append(stats.CPUs, cpu) - } - } - - return stats, nil -} - -func parseProcSchedstat(contents string) (ProcSchedstat, error) { - var ( - stats ProcSchedstat - err error - ) - match := procLineRE.FindStringSubmatch(contents) - - if match != nil { - stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) - if err != nil { - return stats, err - } - - stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) - if err != nil { - return stats, err - } - - stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) - return stats, err - } - - return stats, errors.New("could not parse schedstat") -} diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go deleted file mode 100644 index bc9aaf5c2..000000000 --- a/vendor/github.com/prometheus/procfs/slab.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -var ( - slabSpace = regexp.MustCompile(`\s+`) - slabVer = regexp.MustCompile(`slabinfo -`) - slabHeader = regexp.MustCompile(`# name`) -) - -// Slab represents a slab pool in the kernel. -type Slab struct { - Name string - ObjActive int64 - ObjNum int64 - ObjSize int64 - ObjPerSlab int64 - PagesPerSlab int64 - // tunables - Limit int64 - Batch int64 - SharedFactor int64 - SlabActive int64 - SlabNum int64 - SharedAvail int64 -} - -// SlabInfo represents info for all slabs. -type SlabInfo struct { - Slabs []*Slab -} - -func shouldParseSlab(line string) bool { - if slabVer.MatchString(line) { - return false - } - if slabHeader.MatchString(line) { - return false - } - return true -} - -// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1. -func parseV21SlabEntry(line string) (*Slab, error) { - // First cleanup whitespace. - l := slabSpace.ReplaceAllString(line, " ") - s := strings.Split(l, " ") - if len(s) != 16 { - return nil, fmt.Errorf("unable to parse: %q", line) - } - var err error - i := &Slab{Name: s[0]} - i.ObjActive, err = strconv.ParseInt(s[1], 10, 64) - if err != nil { - return nil, err - } - i.ObjNum, err = strconv.ParseInt(s[2], 10, 64) - if err != nil { - return nil, err - } - i.ObjSize, err = strconv.ParseInt(s[3], 10, 64) - if err != nil { - return nil, err - } - i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64) - if err != nil { - return nil, err - } - i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64) - if err != nil { - return nil, err - } - i.Limit, err = strconv.ParseInt(s[8], 10, 64) - if err != nil { - return nil, err - } - i.Batch, err = strconv.ParseInt(s[9], 10, 64) - if err != nil { - return nil, err - } - i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64) - if err != nil { - return nil, err - } - i.SlabActive, err = strconv.ParseInt(s[13], 10, 64) - if err != nil { - return nil, err - } - i.SlabNum, err = strconv.ParseInt(s[14], 10, 64) - if err != nil { - return nil, err - } - i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64) - if err != nil { - return nil, err - } - return i, nil -} - -// parseSlabInfo21 is used to parse a slabinfo 2.1 file. -func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) { - scanner := bufio.NewScanner(r) - s := SlabInfo{Slabs: []*Slab{}} - for scanner.Scan() { - line := scanner.Text() - if !shouldParseSlab(line) { - continue - } - slab, err := parseV21SlabEntry(line) - if err != nil { - return s, err - } - s.Slabs = append(s.Slabs, slab) - } - return s, nil -} - -// SlabInfo reads data from `/proc/slabinfo`. -func (fs FS) SlabInfo() (SlabInfo, error) { - // TODO: Consider passing options to allow for parsing different - // slabinfo versions. However, slabinfo 2.1 has been stable since - // kernel 2.6.10 and later. - data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo")) - if err != nil { - return SlabInfo{}, err - } - - return parseSlabInfo21(bytes.NewReader(data)) -} diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go deleted file mode 100644 index 559129cbc..000000000 --- a/vendor/github.com/prometheus/procfs/softirqs.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Softirqs represents the softirq statistics. -type Softirqs struct { - Hi []uint64 - Timer []uint64 - NetTx []uint64 - NetRx []uint64 - Block []uint64 - IRQPoll []uint64 - Tasklet []uint64 - Sched []uint64 - HRTimer []uint64 - RCU []uint64 -} - -func (fs FS) Softirqs() (Softirqs, error) { - fileName := fs.proc.Path("softirqs") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Softirqs{}, err - } - - reader := bytes.NewReader(data) - - return parseSoftirqs(reader) -} - -func parseSoftirqs(r io.Reader) (Softirqs, error) { - var ( - softirqs = Softirqs{} - scanner = bufio.NewScanner(r) - ) - - if !scanner.Scan() { - return Softirqs{}, fmt.Errorf("softirqs empty") - } - - for scanner.Scan() { - parts := strings.Fields(scanner.Text()) - var err error - - // require at least one cpu - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "HI:": - perCPU := parts[1:] - softirqs.Hi = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err) - } - } - case parts[0] == "TIMER:": - perCPU := parts[1:] - softirqs.Timer = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err) - } - } - case parts[0] == "NET_TX:": - perCPU := parts[1:] - softirqs.NetTx = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err) - } - } - case parts[0] == "NET_RX:": - perCPU := parts[1:] - softirqs.NetRx = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err) - } - } - case parts[0] == "BLOCK:": - perCPU := parts[1:] - softirqs.Block = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err) - } - } - case parts[0] == "IRQ_POLL:": - perCPU := parts[1:] - softirqs.IRQPoll = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err) - } - } - case parts[0] == "TASKLET:": - perCPU := parts[1:] - softirqs.Tasklet = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err) - } - } - case parts[0] == "SCHED:": - perCPU := parts[1:] - softirqs.Sched = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err) - } - } - case parts[0] == "HRTIMER:": - perCPU := parts[1:] - softirqs.HRTimer = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err) - } - } - case parts[0] == "RCU:": - perCPU := parts[1:] - softirqs.RCU = make([]uint64, len(perCPU)) - for i, count := range perCPU { - if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err) - } - } - } - } - - if err := scanner.Err(); err != nil { - return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err) - } - - return softirqs, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index 586af48af..000000000 --- a/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading `/proc/softirqs`. -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU map[int64]CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat -} - -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err) - } - - return softIRQStat, total, nil -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: Use fs.Stat() instead. -func NewStat() (Stat, error) { - fs, err := NewFS(fs.DefaultProcMountPoint) - if err != nil { - return Stat{}, err - } - return fs.Stat() -} - -// NewStat returns information about current cpu/process statistics. -// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: Use fs.Stat() instead. -func (fs FS) NewStat() (Stat, error) { - return fs.Stat() -} - -// Stat returns information about current cpu/process statistics. -// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Stat() (Stat, error) { - fileName := fs.proc.Path("stat") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Stat{}, err - } - procStat, err := parseStat(bytes.NewReader(data), fileName) - if err != nil { - return Stat{}, err - } - return procStat, nil -} - -// parseStat parses the metrics from /proc/[pid]/stat. -func parseStat(r io.Reader, fileName string) (Stat, error) { - var ( - scanner = bufio.NewScanner(r) - stat = Stat{ - CPU: make(map[int64]CPUStat), - } - err error - ) - - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - stat.CPU[cpuID] = cpuStat - } - } - } - - if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err) - } - - return stat, nil -} diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go deleted file mode 100644 index 15edc2212..000000000 --- a/vendor/github.com/prometheus/procfs/swaps.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Swap represents an entry in /proc/swaps. -type Swap struct { - Filename string - Type string - Size int - Used int - Priority int -} - -// Swaps returns a slice of all configured swap devices on the system. -func (fs FS) Swaps() ([]*Swap, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("swaps")) - if err != nil { - return nil, err - } - return parseSwaps(data) -} - -func parseSwaps(info []byte) ([]*Swap, error) { - swaps := []*Swap{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - scanner.Scan() // ignore header line - for scanner.Scan() { - swapString := scanner.Text() - parsedSwap, err := parseSwapString(swapString) - if err != nil { - return nil, err - } - swaps = append(swaps, parsedSwap) - } - - err := scanner.Err() - return swaps, err -} - -func parseSwapString(swapString string) (*Swap, error) { - var err error - - swapFields := strings.Fields(swapString) - swapLength := len(swapFields) - if swapLength < 5 { - return nil, fmt.Errorf("too few fields in swap string: %s", swapString) - } - - swap := &Swap{ - Filename: swapFields[0], - Type: swapFields[1], - } - - swap.Size, err = strconv.Atoi(swapFields[2]) - if err != nil { - return nil, fmt.Errorf("invalid swap size: %s", swapFields[2]) - } - swap.Used, err = strconv.Atoi(swapFields[3]) - if err != nil { - return nil, fmt.Errorf("invalid swap used: %s", swapFields[3]) - } - swap.Priority, err = strconv.Atoi(swapFields[4]) - if err != nil { - return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4]) - } - - return swap, nil -} diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go deleted file mode 100644 index f08bfc769..000000000 --- a/vendor/github.com/prometheus/procfs/thread.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - - fsi "github.com/prometheus/procfs/internal/fs" -) - -// Provide access to /proc/PID/task/TID files, for thread specific values. Since -// such files have the same structure as /proc/PID/ ones, the data structures -// and the parsers for the latter may be reused. - -// AllThreads returns a list of all currently available threads under /proc/PID. -func AllThreads(pid int) (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllThreads(pid) -} - -// AllThreads returns a list of all currently available threads for PID. -func (fs FS) AllThreads(pid int) (Procs, error) { - taskPath := fs.proc.Path(strconv.Itoa(pid), "task") - d, err := os.Open(taskPath) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err) - } - - t := Procs{} - for _, n := range names { - tid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)}) - } - - return t, nil -} - -// Thread returns a process for a given PID, TID. -func (fs FS) Thread(pid, tid int) (Proc, error) { - taskPath := fs.proc.Path(strconv.Itoa(pid), "task") - if _, err := os.Stat(taskPath); err != nil { - return Proc{}, err - } - return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil -} - -// Thread returns a process for a given TID of Proc. -func (proc Proc) Thread(tid int) (Proc, error) { - tfs := fsi.FS(proc.path("task")) - if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { - return Proc{}, err - } - return Proc{PID: tid, fs: tfs}, nil -} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index 19ef02b8d..000000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C

] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - --recursive-unlink (recursively delete existing directory if path - collides with file or directory to extract) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE -unset RECURSIVE_UNLINK - -while getopts :cf:-:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - -) - case $OPTARG in - recursive-unlink) - RECURSIVE_UNLINK="yes" - ;; - *) - echo -e "Error: invalid option -$OPTARG" - echo - usage 1 - ;; - esac - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -L "$path" ]; then - rm "$path" - elif [ -d "$path" ]; then - if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then - rm -r "$path" - else - # Safe because symlinks to directories are dealt with above - rmdir "$path" - fi - elif [ -e "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go deleted file mode 100644 index cdedcae99..000000000 --- a/vendor/github.com/prometheus/procfs/vm.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// The VM interface is described at -// -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -// -// Each setting is exposed as a single file. -// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string. -type VM struct { - AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes - BlockDump *int64 // /proc/sys/vm/block_dump - CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed - DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes - DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio - DirtyBytes *int64 // /proc/sys/vm/dirty_bytes - DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs - DirtyRatio *int64 // /proc/sys/vm/dirty_ratio - DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds - DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs - DropCaches *int64 // /proc/sys/vm/drop_caches - ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold - HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group - LaptopMode *int64 // /proc/sys/vm/laptop_mode - LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout - LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio - MaxMapCount *int64 // /proc/sys/vm/max_map_count - MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill - MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery - MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes - MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio - MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio - MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr - NrHugepages *int64 // /proc/sys/vm/nr_hugepages - NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy - NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages - NumaStat *int64 // /proc/sys/vm/numa_stat - NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order - OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks - OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task - OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes - OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory - OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio - PageCluster *int64 // /proc/sys/vm/page-cluster - PanicOnOom *int64 // /proc/sys/vm/panic_on_oom - PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction - StatInterval *int64 // /proc/sys/vm/stat_interval - Swappiness *int64 // /proc/sys/vm/swappiness - UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes - VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure - WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor - WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor - ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode -} - -// VM reads the VM statistics from the specified `proc` filesystem. -func (fs FS) VM() (*VM, error) { - path := fs.proc.Path("sys/vm") - file, err := os.Stat(path) - if err != nil { - return nil, err - } - if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) - } - - files, err := os.ReadDir(path) - if err != nil { - return nil, err - } - - var vm VM - for _, f := range files { - if f.IsDir() { - continue - } - - name := filepath.Join(path, f.Name()) - // ignore errors on read, as there are some write only - // in /proc/sys/vm - value, err := util.SysReadFile(name) - if err != nil { - continue - } - vp := util.NewValueParser(value) - - switch f.Name() { - case "admin_reserve_kbytes": - vm.AdminReserveKbytes = vp.PInt64() - case "block_dump": - vm.BlockDump = vp.PInt64() - case "compact_unevictable_allowed": - vm.CompactUnevictableAllowed = vp.PInt64() - case "dirty_background_bytes": - vm.DirtyBackgroundBytes = vp.PInt64() - case "dirty_background_ratio": - vm.DirtyBackgroundRatio = vp.PInt64() - case "dirty_bytes": - vm.DirtyBytes = vp.PInt64() - case "dirty_expire_centisecs": - vm.DirtyExpireCentisecs = vp.PInt64() - case "dirty_ratio": - vm.DirtyRatio = vp.PInt64() - case "dirtytime_expire_seconds": - vm.DirtytimeExpireSeconds = vp.PInt64() - case "dirty_writeback_centisecs": - vm.DirtyWritebackCentisecs = vp.PInt64() - case "drop_caches": - vm.DropCaches = vp.PInt64() - case "extfrag_threshold": - vm.ExtfragThreshold = vp.PInt64() - case "hugetlb_shm_group": - vm.HugetlbShmGroup = vp.PInt64() - case "laptop_mode": - vm.LaptopMode = vp.PInt64() - case "legacy_va_layout": - vm.LegacyVaLayout = vp.PInt64() - case "lowmem_reserve_ratio": - stringSlice := strings.Fields(value) - pint64Slice := make([]*int64, 0, len(stringSlice)) - for _, value := range stringSlice { - vp := util.NewValueParser(value) - pint64Slice = append(pint64Slice, vp.PInt64()) - } - vm.LowmemReserveRatio = pint64Slice - case "max_map_count": - vm.MaxMapCount = vp.PInt64() - case "memory_failure_early_kill": - vm.MemoryFailureEarlyKill = vp.PInt64() - case "memory_failure_recovery": - vm.MemoryFailureRecovery = vp.PInt64() - case "min_free_kbytes": - vm.MinFreeKbytes = vp.PInt64() - case "min_slab_ratio": - vm.MinSlabRatio = vp.PInt64() - case "min_unmapped_ratio": - vm.MinUnmappedRatio = vp.PInt64() - case "mmap_min_addr": - vm.MmapMinAddr = vp.PInt64() - case "nr_hugepages": - vm.NrHugepages = vp.PInt64() - case "nr_hugepages_mempolicy": - vm.NrHugepagesMempolicy = vp.PInt64() - case "nr_overcommit_hugepages": - vm.NrOvercommitHugepages = vp.PInt64() - case "numa_stat": - vm.NumaStat = vp.PInt64() - case "numa_zonelist_order": - vm.NumaZonelistOrder = value - case "oom_dump_tasks": - vm.OomDumpTasks = vp.PInt64() - case "oom_kill_allocating_task": - vm.OomKillAllocatingTask = vp.PInt64() - case "overcommit_kbytes": - vm.OvercommitKbytes = vp.PInt64() - case "overcommit_memory": - vm.OvercommitMemory = vp.PInt64() - case "overcommit_ratio": - vm.OvercommitRatio = vp.PInt64() - case "page-cluster": - vm.PageCluster = vp.PInt64() - case "panic_on_oom": - vm.PanicOnOom = vp.PInt64() - case "percpu_pagelist_fraction": - vm.PercpuPagelistFraction = vp.PInt64() - case "stat_interval": - vm.StatInterval = vp.PInt64() - case "swappiness": - vm.Swappiness = vp.PInt64() - case "user_reserve_kbytes": - vm.UserReserveKbytes = vp.PInt64() - case "vfs_cache_pressure": - vm.VfsCachePressure = vp.PInt64() - case "watermark_boost_factor": - vm.WatermarkBoostFactor = vp.PInt64() - case "watermark_scale_factor": - vm.WatermarkScaleFactor = vp.PInt64() - case "zone_reclaim_mode": - vm.ZoneReclaimMode = vp.PInt64() - } - if err := vp.Err(); err != nil { - return nil, err - } - } - - return &vm, nil -} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go deleted file mode 100644 index c745a4c04..000000000 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !windows -// +build !windows - -package procfs - -import ( - "bytes" - "fmt" - "os" - "regexp" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Zoneinfo holds info parsed from /proc/zoneinfo. -type Zoneinfo struct { - Node string - Zone string - NrFreePages *int64 - Min *int64 - Low *int64 - High *int64 - Scanned *int64 - Spanned *int64 - Present *int64 - Managed *int64 - NrActiveAnon *int64 - NrInactiveAnon *int64 - NrIsolatedAnon *int64 - NrAnonPages *int64 - NrAnonTransparentHugepages *int64 - NrActiveFile *int64 - NrInactiveFile *int64 - NrIsolatedFile *int64 - NrFilePages *int64 - NrSlabReclaimable *int64 - NrSlabUnreclaimable *int64 - NrMlockStack *int64 - NrKernelStack *int64 - NrMapped *int64 - NrDirty *int64 - NrWriteback *int64 - NrUnevictable *int64 - NrShmem *int64 - NrDirtied *int64 - NrWritten *int64 - NumaHit *int64 - NumaMiss *int64 - NumaForeign *int64 - NumaInterleave *int64 - NumaLocal *int64 - NumaOther *int64 - Protection []*int64 -} - -var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) - -// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of -// structs containing the relevant info. More information available here: -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := os.ReadFile(fs.proc.Path("zoneinfo")) - if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) - } - zoneinfo, err := parseZoneinfo(data) - if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err) - } - return zoneinfo, nil -} - -func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { - - zoneinfo := []Zoneinfo{} - - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { - var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { - - if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { - zoneinfoElement.Node = nodeZone[1] - zoneinfoElement.Zone = nodeZone[2] - continue - } - if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { - continue - } - parts := strings.Fields(strings.TrimSpace(line)) - if len(parts) < 2 { - continue - } - vp := util.NewValueParser(parts[1]) - switch parts[0] { - case "nr_free_pages": - zoneinfoElement.NrFreePages = vp.PInt64() - case "min": - zoneinfoElement.Min = vp.PInt64() - case "low": - zoneinfoElement.Low = vp.PInt64() - case "high": - zoneinfoElement.High = vp.PInt64() - case "scanned": - zoneinfoElement.Scanned = vp.PInt64() - case "spanned": - zoneinfoElement.Spanned = vp.PInt64() - case "present": - zoneinfoElement.Present = vp.PInt64() - case "managed": - zoneinfoElement.Managed = vp.PInt64() - case "nr_active_anon": - zoneinfoElement.NrActiveAnon = vp.PInt64() - case "nr_inactive_anon": - zoneinfoElement.NrInactiveAnon = vp.PInt64() - case "nr_isolated_anon": - zoneinfoElement.NrIsolatedAnon = vp.PInt64() - case "nr_anon_pages": - zoneinfoElement.NrAnonPages = vp.PInt64() - case "nr_anon_transparent_hugepages": - zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() - case "nr_active_file": - zoneinfoElement.NrActiveFile = vp.PInt64() - case "nr_inactive_file": - zoneinfoElement.NrInactiveFile = vp.PInt64() - case "nr_isolated_file": - zoneinfoElement.NrIsolatedFile = vp.PInt64() - case "nr_file_pages": - zoneinfoElement.NrFilePages = vp.PInt64() - case "nr_slab_reclaimable": - zoneinfoElement.NrSlabReclaimable = vp.PInt64() - case "nr_slab_unreclaimable": - zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() - case "nr_mlock_stack": - zoneinfoElement.NrMlockStack = vp.PInt64() - case "nr_kernel_stack": - zoneinfoElement.NrKernelStack = vp.PInt64() - case "nr_mapped": - zoneinfoElement.NrMapped = vp.PInt64() - case "nr_dirty": - zoneinfoElement.NrDirty = vp.PInt64() - case "nr_writeback": - zoneinfoElement.NrWriteback = vp.PInt64() - case "nr_unevictable": - zoneinfoElement.NrUnevictable = vp.PInt64() - case "nr_shmem": - zoneinfoElement.NrShmem = vp.PInt64() - case "nr_dirtied": - zoneinfoElement.NrDirtied = vp.PInt64() - case "nr_written": - zoneinfoElement.NrWritten = vp.PInt64() - case "numa_hit": - zoneinfoElement.NumaHit = vp.PInt64() - case "numa_miss": - zoneinfoElement.NumaMiss = vp.PInt64() - case "numa_foreign": - zoneinfoElement.NumaForeign = vp.PInt64() - case "numa_interleave": - zoneinfoElement.NumaInterleave = vp.PInt64() - case "numa_local": - zoneinfoElement.NumaLocal = vp.PInt64() - case "numa_other": - zoneinfoElement.NumaOther = vp.PInt64() - case "protection:": - protectionParts := strings.Split(line, ":") - protectionValues := strings.Replace(protectionParts[1], "(", "", 1) - protectionValues = strings.Replace(protectionValues, ")", "", 1) - protectionValues = strings.TrimSpace(protectionValues) - protectionStringMap := strings.Split(protectionValues, ", ") - val, err := util.ParsePInt64s(protectionStringMap) - if err == nil { - zoneinfoElement.Protection = val - } - } - - } - - zoneinfo = append(zoneinfo, zoneinfoElement) - } - return zoneinfo, nil -} diff --git a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go b/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go index 8c35b1722..9bed2419e 100644 --- a/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go +++ b/vendor/github.com/shirou/gopsutil/v3/internal/common/sleep.go @@ -11,6 +11,9 @@ func Sleep(ctx context.Context, interval time.Duration) error { timer := time.NewTimer(interval) select { case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } return ctx.Err() case <-timer.C: return nil diff --git a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go index 9a5d693b1..059518dc5 100644 --- a/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go +++ b/vendor/github.com/shirou/gopsutil/v3/mem/mem_linux.go @@ -14,8 +14,9 @@ import ( "strconv" "strings" - "github.com/shirou/gopsutil/v3/internal/common" "golang.org/x/sys/unix" + + "github.com/shirou/gopsutil/v3/internal/common" ) type VirtualMemoryExStat struct { @@ -153,13 +154,13 @@ func fillFromMeminfoWithContext() (*VirtualMemoryStat, *VirtualMemoryExStat, err return ret, retEx, err } retEx.Unevictable = t * 1024 - case "WriteBack": + case "Writeback": t, err := strconv.ParseUint(value, 10, 64) if err != nil { return ret, retEx, err } ret.WriteBack = t * 1024 - case "WriteBackTmp": + case "WritebackTmp": t, err := strconv.ParseUint(value, 10, 64) if err != nil { return ret, retEx, err diff --git a/vendor/github.com/vishvananda/netlink/chain.go b/vendor/github.com/vishvananda/netlink/chain.go new file mode 100644 index 000000000..1d1c144e9 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain.go @@ -0,0 +1,22 @@ +package netlink + +import ( + "fmt" +) + +// Chain contains the attributes of a Chain +type Chain struct { + Parent uint32 + Chain uint32 +} + +func (c Chain) String() string { + return fmt.Sprintf("{Parent: %d, Chain: %d}", c.Parent, c.Chain) +} + +func NewChain(parent uint32, chain uint32) Chain { + return Chain{ + Parent: parent, + Chain: chain, + } +} diff --git a/vendor/github.com/vishvananda/netlink/chain_linux.go b/vendor/github.com/vishvananda/netlink/chain_linux.go new file mode 100644 index 000000000..d9f441613 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/chain_linux.go @@ -0,0 +1,112 @@ +package netlink + +import ( + "github.com/vishvananda/netlink/nl" + "golang.org/x/sys/unix" +) + +// ChainDel will delete a chain from the system. +func ChainDel(link Link, chain Chain) error { + // Equivalent to: `tc chain del $chain` + return pkgHandle.ChainDel(link, chain) +} + +// ChainDel will delete a chain from the system. +// Equivalent to: `tc chain del $chain` +func (h *Handle) ChainDel(link Link, chain Chain) error { + return h.chainModify(unix.RTM_DELCHAIN, 0, link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func ChainAdd(link Link, chain Chain) error { + return pkgHandle.ChainAdd(link, chain) +} + +// ChainAdd will add a chain to the system. +// Equivalent to: `tc chain add` +func (h *Handle) ChainAdd(link Link, chain Chain) error { + return h.chainModify( + unix.RTM_NEWCHAIN, + unix.NLM_F_CREATE|unix.NLM_F_EXCL, + link, + chain) +} + +func (h *Handle) chainModify(cmd, flags int, link Link, chain Chain) error { + req := h.newNetlinkRequest(cmd, flags|unix.NLM_F_ACK) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: chain.Parent, + } + req.AddData(msg) + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(chain.Chain))) + + _, err := req.Execute(unix.NETLINK_ROUTE, 0) + return err +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func ChainList(link Link, parent uint32) ([]Chain, error) { + return pkgHandle.ChainList(link, parent) +} + +// ChainList gets a list of chains in the system. +// Equivalent to: `tc chain list`. +// The list can be filtered by link. +func (h *Handle) ChainList(link Link, parent uint32) ([]Chain, error) { + req := h.newNetlinkRequest(unix.RTM_GETCHAIN, unix.NLM_F_DUMP) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + Parent: parent, + } + req.AddData(msg) + + msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWCHAIN) + if err != nil { + return nil, err + } + + var res []Chain + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // skip chains from other interfaces + if link != nil && msg.Ifindex != index { + continue + } + + var chain Chain + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_CHAIN: + chain.Chain = native.Uint32(attr.Value) + chain.Parent = parent + } + } + res = append(res, chain) + } + + return res, nil +} diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go index 03ea1b98f..67b92e229 100644 --- a/vendor/github.com/vishvananda/netlink/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -149,20 +149,26 @@ type ConntrackFlow struct { TimeStart uint64 TimeStop uint64 TimeOut uint32 + Labels []byte } func (s *ConntrackFlow) String() string { // conntrack cmd output: - // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 + // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 packets=5 bytes=532 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 packets=10 bytes=1078 mark=0 labels=0x00000000050012ac4202010000000000 // start=2019-07-26 01:26:21.557800506 +0000 UTC stop=1970-01-01 00:00:00 +0000 UTC timeout=30(sec) start := time.Unix(0, int64(s.TimeStart)) stop := time.Unix(0, int64(s.TimeStop)) timeout := int32(s.TimeOut) - return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x start=%v stop=%v timeout=%d(sec)", + res := fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d packets=%d bytes=%d\tsrc=%s dst=%s sport=%d dport=%d packets=%d bytes=%d mark=0x%x ", nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol, s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, s.Forward.Packets, s.Forward.Bytes, s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Reverse.Packets, s.Reverse.Bytes, - s.Mark, start, stop, timeout) + s.Mark) + if len(s.Labels) > 0 { + res += fmt.Sprintf("labels=0x%x ", s.Labels) + } + res += fmt.Sprintf("start=%v stop=%v timeout=%d(sec)", start, stop, timeout) + return res } // This method parse the ip tuple structure @@ -306,6 +312,12 @@ func parseConnectionMark(r *bytes.Reader) (mark uint32) { return } +func parseConnectionLabels(r *bytes.Reader) (label []byte) { + label = make([]byte, 16) // netfilter defines 128 bit labels value + binary.Read(r, nl.NativeEndian(), &label) + return +} + func parseRawData(data []byte) *ConntrackFlow { s := &ConntrackFlow{} // First there is the Nfgenmsg header @@ -351,6 +363,8 @@ func parseRawData(data []byte) *ConntrackFlow { switch t { case nl.CTA_MARK: s.Mark = parseConnectionMark(reader) + case nl.CTA_LABELS: + s.Labels = parseConnectionLabels(reader) case nl.CTA_TIMEOUT: s.TimeOut = parseTimeOut(reader) case nl.CTA_STATUS, nl.CTA_USE, nl.CTA_ID: @@ -406,6 +420,8 @@ const ( ConntrackReplyAnyIP // Match source or destination reply IP ConntrackOrigSrcPort // --orig-port-src port Source port in original direction ConntrackOrigDstPort // --orig-port-dst port Destination port in original direction + ConntrackMatchLabels // --label label1,label2 Labels used in entry + ConntrackUnmatchLabels // --label label1,label2 Labels not used in entry ConntrackNatSrcIP = ConntrackReplySrcIP // deprecated use instead ConntrackReplySrcIP ConntrackNatDstIP = ConntrackReplyDstIP // deprecated use instead ConntrackReplyDstIP ConntrackNatAnyIP = ConntrackReplyAnyIP // deprecated use instead ConntrackReplyAnyIP @@ -421,6 +437,7 @@ type ConntrackFilter struct { ipNetFilter map[ConntrackFilterType]*net.IPNet portFilter map[ConntrackFilterType]uint16 protoFilter uint8 + labelFilter map[ConntrackFilterType][][]byte } // AddIPNet adds a IP subnet to the conntrack filter @@ -474,10 +491,34 @@ func (f *ConntrackFilter) AddProtocol(proto uint8) error { return nil } +// AddLabels adds the provided list (zero or more) of labels to the conntrack filter +// ConntrackFilterType here can be either: +// 1) ConntrackMatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` contains ALL the provided labels +// it is considered a match. This can be used when you want to match flows that contain +// one or more labels. +// 2) ConntrackUnmatchLabels: This matches every flow that has a label value (len(flow.Labels) > 0) +// against the list of provided labels. If `flow.Labels` does NOT contain ALL the provided labels +// it is considered a match. This can be used when you want to match flows that don't contain +// one or more labels. +func (f *ConntrackFilter) AddLabels(tp ConntrackFilterType, labels [][]byte) error { + if len(labels) == 0 { + return errors.New("Invalid length for provided labels") + } + if f.labelFilter == nil { + f.labelFilter = make(map[ConntrackFilterType][][]byte) + } + if _, ok := f.labelFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.labelFilter[tp] = labels + return nil +} + // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter // false otherwise func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { - if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 { + if len(f.ipNetFilter) == 0 && len(f.portFilter) == 0 && f.protoFilter == 0 && len(f.labelFilter) == 0 { // empty filter always not match return false } @@ -531,6 +572,29 @@ func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { } } + // Label filter + if len(f.labelFilter) > 0 { + if len(flow.Labels) > 0 { + // --label label1,label2 in conn entry; + // every label passed should be contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackMatchLabels]; match && found { + for _, label := range elem { + match = match && (bytes.Contains(flow.Labels, label)) + } + } + // --label label1,label2 in conn entry; + // every label passed should be not contained in flow.Labels for a match to be true + if elem, found := f.labelFilter[ConntrackUnmatchLabels]; match && found { + for _, label := range elem { + match = match && !(bytes.Contains(flow.Labels, label)) + } + } + } else { + // flow doesn't contain labels, so it doesn't contain or notContain any provided matches + match = false + } + } + return match } diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go index 2d798b0fb..fe4181335 100644 --- a/vendor/github.com/vishvananda/netlink/filter.go +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -19,6 +19,7 @@ type FilterAttrs struct { Parent uint32 Priority uint16 // lower is higher priority Protocol uint16 // unix.ETH_P_* + Chain *uint32 } func (q FilterAttrs) String() string { @@ -27,6 +28,11 @@ func (q FilterAttrs) String() string { type TcAct int32 +const ( + TC_ACT_EXT_SHIFT = 28 + TC_ACT_EXT_VAL_MASK = (1 << TC_ACT_EXT_SHIFT) - 1 +) + const ( TC_ACT_UNSPEC TcAct = -1 TC_ACT_OK TcAct = 0 @@ -40,6 +46,22 @@ const ( TC_ACT_JUMP TcAct = 0x10000000 ) +func getTcActExt(local int32) int32 { + return local << TC_ACT_EXT_SHIFT +} + +func getTcActGotoChain() TcAct { + return TcAct(getTcActExt(2)) +} + +func getTcActExtOpcode(combined int32) int32 { + return combined & (^TC_ACT_EXT_VAL_MASK) +} + +func TcActExtCmp(combined int32, opcode int32) bool { + return getTcActExtOpcode(combined) == opcode +} + func (a TcAct) String() string { switch a { case TC_ACT_UNSPEC: @@ -63,6 +85,9 @@ func (a TcAct) String() string { case TC_ACT_JUMP: return "jump" } + if TcActExtCmp(int32(a), int32(getTcActGotoChain())) { + return "goto" + } return fmt.Sprintf("0x%x", int32(a)) } @@ -112,6 +137,7 @@ type Action interface { type GenericAction struct { ActionAttrs + Chain int32 } func (action *GenericAction) Type() string { @@ -275,6 +301,7 @@ type SkbEditAction struct { PType *uint16 Priority *uint32 Mark *uint32 + Mask *uint32 } func (action *SkbEditAction) Type() string { @@ -348,6 +375,7 @@ type FwFilter struct { InDev string Mask uint32 Police *PoliceAction + Actions []Action } func (filter *FwFilter) Attrs() *FilterAttrs { diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go index 4c6d1cf7d..fbd434aa9 100644 --- a/vendor/github.com/vishvananda/netlink/filter_linux.go +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -64,6 +64,11 @@ type Flower struct { EncSrcIPMask net.IPMask EncDestPort uint16 EncKeyId uint32 + SkipHw bool + SkipSw bool + IPProto *nl.IPProto + DestPort uint16 + SrcPort uint16 Actions []Action } @@ -129,6 +134,39 @@ func (filter *Flower) encode(parent *nl.RtAttr) error { if filter.EncKeyId != 0 { parent.AddRtAttr(nl.TCA_FLOWER_KEY_ENC_KEY_ID, htonl(filter.EncKeyId)) } + if filter.IPProto != nil { + ipproto := *filter.IPProto + parent.AddRtAttr(nl.TCA_FLOWER_KEY_IP_PROTO, ipproto.Serialize()) + if filter.SrcPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_SRC, htons(filter.SrcPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_SRC, htons(filter.SrcPort)) + } + } + if filter.DestPort != 0 { + switch ipproto { + case nl.IPPROTO_TCP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_TCP_DST, htons(filter.DestPort)) + case nl.IPPROTO_UDP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_UDP_DST, htons(filter.DestPort)) + case nl.IPPROTO_SCTP: + parent.AddRtAttr(nl.TCA_FLOWER_KEY_SCTP_DST, htons(filter.DestPort)) + } + } + } + + var flags uint32 = 0 + if filter.SkipHw { + flags |= nl.TCA_CLS_FLAGS_SKIP_HW + } + if filter.SkipSw { + flags |= nl.TCA_CLS_FLAGS_SKIP_SW + } + parent.AddRtAttr(nl.TCA_FLOWER_FLAGS, htonl(flags)) actionsAttr := parent.AddRtAttr(nl.TCA_FLOWER_ACT, nil) if err := EncodeActions(actionsAttr, filter.Actions); err != nil { @@ -162,6 +200,14 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { filter.EncDestPort = ntohs(datum.Value) case nl.TCA_FLOWER_KEY_ENC_KEY_ID: filter.EncKeyId = ntohl(datum.Value) + case nl.TCA_FLOWER_KEY_IP_PROTO: + val := new(nl.IPProto) + *val = nl.IPProto(datum.Value[0]) + filter.IPProto = val + case nl.TCA_FLOWER_KEY_TCP_SRC, nl.TCA_FLOWER_KEY_UDP_SRC, nl.TCA_FLOWER_KEY_SCTP_SRC: + filter.SrcPort = ntohs(datum.Value) + case nl.TCA_FLOWER_KEY_TCP_DST, nl.TCA_FLOWER_KEY_UDP_DST, nl.TCA_FLOWER_KEY_SCTP_DST: + filter.DestPort = ntohs(datum.Value) case nl.TCA_FLOWER_ACT: tables, err := nl.ParseRouteAttr(datum.Value) if err != nil { @@ -171,6 +217,16 @@ func (filter *Flower) decode(data []syscall.NetlinkRouteAttr) error { if err != nil { return err } + case nl.TCA_FLOWER_FLAGS: + attr := nl.DeserializeUint32Bitfield(datum.Value) + skipSw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_HW + skipHw := attr.Value & nl.TCA_CLS_FLAGS_SKIP_SW + if skipSw != 0 { + filter.SkipSw = true + } + if skipHw != 0 { + filter.SkipHw = true + } } } return nil @@ -185,19 +241,7 @@ func FilterDel(filter Filter) error { // FilterDel will delete a filter from the system. // Equivalent to: `tc filter del $filter` func (h *Handle) FilterDel(filter Filter) error { - req := h.newNetlinkRequest(unix.RTM_DELTFILTER, unix.NLM_F_ACK) - base := filter.Attrs() - msg := &nl.TcMsg{ - Family: nl.FAMILY_ALL, - Ifindex: int32(base.LinkIndex), - Handle: base.Handle, - Parent: base.Parent, - Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), - } - req.AddData(msg) - - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err + return h.filterModify(filter, unix.RTM_DELTFILTER, 0) } // FilterAdd will add a filter to the system. @@ -209,7 +253,7 @@ func FilterAdd(filter Filter) error { // FilterAdd will add a filter to the system. // Equivalent to: `tc filter add $filter` func (h *Handle) FilterAdd(filter Filter) error { - return h.filterModify(filter, unix.NLM_F_CREATE|unix.NLM_F_EXCL) + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE|unix.NLM_F_EXCL) } // FilterReplace will replace a filter. @@ -221,11 +265,11 @@ func FilterReplace(filter Filter) error { // FilterReplace will replace a filter. // Equivalent to: `tc filter replace $filter` func (h *Handle) FilterReplace(filter Filter) error { - return h.filterModify(filter, unix.NLM_F_CREATE) + return h.filterModify(filter, unix.RTM_NEWTFILTER, unix.NLM_F_CREATE) } -func (h *Handle) filterModify(filter Filter, flags int) error { - req := h.newNetlinkRequest(unix.RTM_NEWTFILTER, flags|unix.NLM_F_ACK) +func (h *Handle) filterModify(filter Filter, proto, flags int) error { + req := h.newNetlinkRequest(proto, flags|unix.NLM_F_ACK) base := filter.Attrs() msg := &nl.TcMsg{ Family: nl.FAMILY_ALL, @@ -235,6 +279,9 @@ func (h *Handle) filterModify(filter Filter, flags int) error { Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), } req.AddData(msg) + if filter.Attrs().Chain != nil { + req.AddData(nl.NewRtAttr(nl.TCA_CHAIN, nl.Uint32Attr(*filter.Attrs().Chain))) + } req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type()))) options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) @@ -312,6 +359,10 @@ func (h *Handle) filterModify(filter Filter, flags int) error { native.PutUint32(b, filter.ClassId) options.AddRtAttr(nl.TCA_FW_CLASSID, b) } + actionsAttr := options.AddRtAttr(nl.TCA_FW_ACT, nil) + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } case *BpfFilter: var bpfFlags uint32 if filter.ClassId != 0 { @@ -340,7 +391,6 @@ func (h *Handle) filterModify(filter Filter, flags int) error { return err } } - req.AddData(options) _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err @@ -446,6 +496,10 @@ func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { default: detailed = true } + case nl.TCA_CHAIN: + val := new(uint32) + *val = native.Uint32(attr.Value) + base.Chain = val } } // only return the detailed version of the filter @@ -597,6 +651,9 @@ func EncodeActions(attr *nl.RtAttr, actions []Action) error { if action.Mark != nil { aopts.AddRtAttr(nl.TCA_SKBEDIT_MARK, nl.Uint32Attr(*action.Mark)) } + if action.Mask != nil { + aopts.AddRtAttr(nl.TCA_SKBEDIT_MASK, nl.Uint32Attr(*action.Mask)) + } case *ConnmarkAction: table := attr.AddRtAttr(tabIndex, nil) tabIndex++ @@ -739,6 +796,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_SKBEDIT_MARK: mark := native.Uint32(adatum.Value[0:4]) action.(*SkbEditAction).Mark = &mark + case nl.TCA_SKBEDIT_MASK: + mask := native.Uint32(adatum.Value[0:4]) + action.(*SkbEditAction).Mask = &mask case nl.TCA_SKBEDIT_PRIORITY: priority := native.Uint32(adatum.Value[0:4]) action.(*SkbEditAction).Priority = &priority @@ -780,6 +840,9 @@ func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { case nl.TCA_GACT_PARMS: gen := *nl.DeserializeTcGen(adatum.Value) toAttrs(&gen, action.Attrs()) + if action.Attrs().Action.String() == "goto" { + action.(*GenericAction).Chain = TC_ACT_EXT_VAL_MASK & gen.Action + } } case "police": parsePolice(adatum, action.(*PoliceAction)) @@ -855,6 +918,15 @@ func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { parsePolice(aattr, &police) } fw.Police = &police + case nl.TCA_FW_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + fw.Actions, err = parseActions(tables) + if err != nil { + return detailed, err + } } } return detailed, nil diff --git a/vendor/github.com/vishvananda/netlink/ipset_linux.go b/vendor/github.com/vishvananda/netlink/ipset_linux.go index 1f4eae81c..f4c05229f 100644 --- a/vendor/github.com/vishvananda/netlink/ipset_linux.go +++ b/vendor/github.com/vishvananda/netlink/ipset_linux.go @@ -67,11 +67,13 @@ type IpsetCreateOptions struct { Comments bool Skbinfo bool - Revision uint8 - IPFrom net.IP - IPTo net.IP - PortFrom uint16 - PortTo uint16 + Family uint8 + Revision uint8 + IPFrom net.IP + IPTo net.IP + PortFrom uint16 + PortTo uint16 + MaxElements uint32 } // IpsetProtocol returns the ipset protocol version from the kernel @@ -94,6 +96,11 @@ func IpsetFlush(setname string) error { return pkgHandle.IpsetFlush(setname) } +// IpsetSwap swaps two ipsets. +func IpsetSwap(setname, othersetname string) error { + return pkgHandle.IpsetSwap(setname, othersetname) +} + // IpsetList dumps an specific ipset. func IpsetList(setname string) (*IPSetResult, error) { return pkgHandle.IpsetList(setname) @@ -114,6 +121,11 @@ func IpsetDel(setname string, entry *IPSetEntry) error { return pkgHandle.IpsetDel(setname, entry) } +// IpsetTest tests whether an entry is in a set or not. +func IpsetTest(setname string, entry *IPSetEntry) (bool, error) { + return pkgHandle.IpsetTest(setname, entry) +} + func (h *Handle) IpsetProtocol() (protocol uint8, minVersion uint8, err error) { req := h.newIpsetRequest(nl.IPSET_CMD_PROTOCOL) msgs, err := req.Execute(unix.NETLINK_NETFILTER, 0) @@ -153,11 +165,18 @@ func (h *Handle) IpsetCreate(setname, typename string, options IpsetCreateOption data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_FROM|int(nl.NLA_F_NET_BYTEORDER), buf[:2])) data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_PORT_TO|int(nl.NLA_F_NET_BYTEORDER), buf[2:])) default: - family = unix.AF_INET + family = options.Family + if family == 0 { + family = unix.AF_INET + } } req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_FAMILY, nl.Uint8Attr(family))) + if options.MaxElements != 0 { + data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_MAXELEM | nl.NLA_F_NET_BYTEORDER, Value: options.MaxElements}) + } + if timeout := options.Timeout; timeout != nil { data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_TIMEOUT | nl.NLA_F_NET_BYTEORDER, Value: *timeout}) } @@ -197,6 +216,14 @@ func (h *Handle) IpsetFlush(setname string) error { return err } +func (h *Handle) IpsetSwap(setname, othersetname string) error { + req := h.newIpsetRequest(nl.IPSET_CMD_SWAP) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_TYPENAME, nl.ZeroTerminated(othersetname))) + _, err := ipsetExecute(req) + return err +} + func (h *Handle) IpsetList(name string) (*IPSetResult, error) { req := h.newIpsetRequest(nl.IPSET_CMD_LIST) req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(name))) @@ -236,18 +263,23 @@ func (h *Handle) IpsetDel(setname string, entry *IPSetEntry) error { return h.ipsetAddDel(nl.IPSET_CMD_DEL, setname, entry) } -func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error { - req := h.newIpsetRequest(nlCmd) - req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) - - if entry.Comment != "" { - req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_COMMENT, nl.ZeroTerminated(entry.Comment))) +func encodeIP(ip net.IP) (*nl.RtAttr, error) { + typ := int(nl.NLA_F_NET_BYTEORDER) + if ip4 := ip.To4(); ip4 != nil { + typ |= nl.IPSET_ATTR_IPADDR_IPV4 + ip = ip4 + } else { + typ |= nl.IPSET_ATTR_IPADDR_IPV6 } + return nl.NewRtAttr(typ, ip), nil +} + +func buildEntryData(entry *IPSetEntry) (*nl.RtAttr, error) { data := nl.NewRtAttr(nl.IPSET_ATTR_DATA|int(nl.NLA_F_NESTED), nil) - if !entry.Replace { - req.Flags |= unix.NLM_F_EXCL + if entry.Comment != "" { + data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_COMMENT, nl.ZeroTerminated(entry.Comment))) } if entry.Timeout != nil { @@ -255,7 +287,10 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error } if entry.IP != nil { - nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP) + nestedData, err := encodeIP(entry.IP) + if err != nil { + return nil, err + } data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NESTED), nestedData.Serialize())) } @@ -268,7 +303,10 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error } if entry.IP2 != nil { - nestedData := nl.NewRtAttr(nl.IPSET_ATTR_IP|int(nl.NLA_F_NET_BYTEORDER), entry.IP2) + nestedData, err := encodeIP(entry.IP2) + if err != nil { + return nil, err + } data.AddChild(nl.NewRtAttr(nl.IPSET_ATTR_IP2|int(nl.NLA_F_NESTED), nestedData.Serialize())) } @@ -295,14 +333,53 @@ func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error if entry.Mark != nil { data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_MARK | nl.NLA_F_NET_BYTEORDER, Value: *entry.Mark}) } + return data, nil +} +func (h *Handle) ipsetAddDel(nlCmd int, setname string, entry *IPSetEntry) error { + req := h.newIpsetRequest(nlCmd) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) + + if !entry.Replace { + req.Flags |= unix.NLM_F_EXCL + } + + data, err := buildEntryData(entry) + if err != nil { + return err + } data.AddChild(&nl.Uint32Attribute{Type: nl.IPSET_ATTR_LINENO | nl.NLA_F_NET_BYTEORDER, Value: 0}) req.AddData(data) - _, err := ipsetExecute(req) + _, err = ipsetExecute(req) return err } +func (h *Handle) IpsetTest(setname string, entry *IPSetEntry) (bool, error) { + req := h.newIpsetRequest(nl.IPSET_CMD_TEST) + req.AddData(nl.NewRtAttr(nl.IPSET_ATTR_SETNAME, nl.ZeroTerminated(setname))) + + if !entry.Replace { + req.Flags |= unix.NLM_F_EXCL + } + + data, err := buildEntryData(entry) + if err != nil { + return false, err + } + req.AddData(data) + + _, err = ipsetExecute(req) + if err != nil { + if err == nl.IPSetError(nl.IPSET_ERR_EXIST) { + // not exist + return false, nil + } + return false, err + } + return true, nil +} + func (h *Handle) newIpsetRequest(cmd int) *nl.NetlinkRequest { req := h.newNetlinkRequest(cmd|(unix.NFNL_SUBSYS_IPSET<<8), nl.GetIpsetFlags(cmd)) @@ -466,7 +543,7 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) { case nl.IPSET_ATTR_IP | nl.NLA_F_NESTED: for attr := range nl.ParseAttributes(attr.Value) { switch attr.Type { - case nl.IPSET_ATTR_IP: + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: entry.IP = net.IP(attr.Value) default: log.Printf("unknown nested ADT attribute from kernel: %+v", attr) @@ -475,7 +552,7 @@ func parseIPSetEntry(data []byte) (entry IPSetEntry) { case nl.IPSET_ATTR_IP2 | nl.NLA_F_NESTED: for attr := range nl.ParseAttributes(attr.Value) { switch attr.Type { - case nl.IPSET_ATTR_IP: + case nl.IPSET_ATTR_IPADDR_IPV4, nl.IPSET_ATTR_IPADDR_IPV6: entry.IP2 = net.IP(attr.Value) default: log.Printf("unknown nested ADT attribute from kernel: %+v", attr) diff --git a/vendor/github.com/vishvananda/netlink/link.go b/vendor/github.com/vishvananda/netlink/link.go index e8915a5d8..4963e11a9 100644 --- a/vendor/github.com/vishvananda/netlink/link.go +++ b/vendor/github.com/vishvananda/netlink/link.go @@ -266,6 +266,7 @@ type Bridge struct { AgeingTime *uint32 HelloTime *uint32 VlanFiltering *bool + VlanDefaultPVID *uint16 } func (bridge *Bridge) Attrs() *LinkAttrs { @@ -1166,6 +1167,7 @@ type Gretun struct { EncapFlags uint16 EncapSport uint16 EncapDport uint16 + FlowBased bool } func (gretun *Gretun) Attrs() *LinkAttrs { @@ -1209,6 +1211,7 @@ func (gtp *GTP) Type() string { } // Virtual XFRM Interfaces +// // Named "xfrmi" to prevent confusion with XFRM objects type Xfrmi struct { LinkAttrs diff --git a/vendor/github.com/vishvananda/netlink/link_linux.go b/vendor/github.com/vishvananda/netlink/link_linux.go index 9e66dc50f..524a93bb0 100644 --- a/vendor/github.com/vishvananda/netlink/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/link_linux.go @@ -345,6 +345,16 @@ func (h *Handle) BridgeSetVlanFiltering(link Link, on bool) error { return h.linkModify(bridge, unix.NLM_F_ACK) } +func BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + return pkgHandle.BridgeSetVlanDefaultPVID(link, pvid) +} + +func (h *Handle) BridgeSetVlanDefaultPVID(link Link, pvid uint16) error { + bridge := link.(*Bridge) + bridge.VlanDefaultPVID = &pvid + return h.linkModify(bridge, unix.NLM_F_ACK) +} + func SetPromiscOn(link Link) error { return pkgHandle.SetPromiscOn(link) } @@ -993,7 +1003,7 @@ func (h *Handle) LinkSetGROMaxSize(link Link, maxSize int) error { b := make([]byte, 4) native.PutUint32(b, uint32(maxSize)) - data := nl.NewRtAttr(nl.IFLA_GRO_MAX_SIZE, b) + data := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, b) req.AddData(data) _, err := req.Execute(unix.NETLINK_ROUTE, 0) @@ -1456,7 +1466,7 @@ func (h *Handle) linkModify(link Link, flags int) error { } if base.GROMaxSize > 0 { - groAttr := nl.NewRtAttr(nl.IFLA_GRO_MAX_SIZE, nl.Uint32Attr(base.GROMaxSize)) + groAttr := nl.NewRtAttr(unix.IFLA_GRO_MAX_SIZE, nl.Uint32Attr(base.GROMaxSize)) req.AddData(groAttr) } @@ -2000,7 +2010,7 @@ func LinkDeserialize(hdr *unix.NlMsghdr, m []byte) (Link, error) { base.GSOMaxSize = native.Uint32(attr.Value[0:4]) case unix.IFLA_GSO_MAX_SEGS: base.GSOMaxSegs = native.Uint32(attr.Value[0:4]) - case nl.IFLA_GRO_MAX_SIZE: + case unix.IFLA_GRO_MAX_SIZE: base.GROMaxSize = native.Uint32(attr.Value[0:4]) case unix.IFLA_VFINFO_LIST: data, err := nl.ParseRouteAttr(attr.Value) @@ -2714,7 +2724,7 @@ func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { if gretap.FlowBased { // In flow based mode, no other attributes need to be configured - data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased)) + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) return } @@ -2797,6 +2807,12 @@ func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { data := linkInfo.AddRtAttr(nl.IFLA_INFO_DATA, nil) + if gre.FlowBased { + // In flow based mode, no other attributes need to be configured + data.AddRtAttr(nl.IFLA_GRE_COLLECT_METADATA, []byte{}) + return + } + if ip := gre.Local; ip != nil { if ip.To4() != nil { ip = ip.To4() @@ -2867,6 +2883,8 @@ func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { gre.EncapSport = ntohs(datum.Value[0:2]) case nl.IFLA_GRE_ENCAP_DPORT: gre.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_COLLECT_METADATA: + gre.FlowBased = true } } } @@ -3176,6 +3194,9 @@ func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) { if bridge.VlanFiltering != nil { data.AddRtAttr(nl.IFLA_BR_VLAN_FILTERING, boolToByte(*bridge.VlanFiltering)) } + if bridge.VlanDefaultPVID != nil { + data.AddRtAttr(nl.IFLA_BR_VLAN_DEFAULT_PVID, nl.Uint16Attr(*bridge.VlanDefaultPVID)) + } } func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { @@ -3194,6 +3215,9 @@ func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { case nl.IFLA_BR_VLAN_FILTERING: vlanFiltering := datum.Value[0] == 1 br.VlanFiltering = &vlanFiltering + case nl.IFLA_BR_VLAN_DEFAULT_PVID: + vlanDefaultPVID := native.Uint16(datum.Value[0:2]) + br.VlanDefaultPVID = &vlanDefaultPVID } } } diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go index 4c1e76635..4bf63037a 100644 --- a/vendor/github.com/vishvananda/netlink/neigh_linux.go +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -370,10 +370,9 @@ func NeighSubscribeWithOptions(ch chan<- NeighUpdate, done <-chan struct{}, opti func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done <-chan struct{}, cberr func(error), listExisting bool) error { s, err := nl.SubscribeAt(newNs, curNs, unix.NETLINK_ROUTE, unix.RTNLGRP_NEIGH) makeRequest := func(family int) error { - req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, - unix.NLM_F_DUMP) - infmsg := nl.NewIfInfomsg(family) - req.AddData(infmsg) + req := pkgHandle.newNetlinkRequest(unix.RTM_GETNEIGH, unix.NLM_F_DUMP) + ndmsg := &Ndmsg{Family: uint8(family)} + req.AddData(ndmsg) if err := s.Send(req); err != nil { return err } @@ -427,12 +426,12 @@ func neighSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- NeighUpdate, done < continue } if m.Header.Type == unix.NLMSG_ERROR { - error := int32(native.Uint32(m.Data[0:4])) - if error == 0 { + nError := int32(native.Uint32(m.Data[0:4])) + if nError == 0 { continue } if cberr != nil { - cberr(syscall.Errno(-error)) + cberr(syscall.Errno(-nError)) } return } diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go index c80b05e9e..c64194905 100644 --- a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -224,6 +224,10 @@ func XfrmPolicyList(family int) ([]XfrmPolicy, error) { return nil, ErrNotImplemented } +func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return nil, ErrNotImplemented +} + func XfrmStateAdd(policy *XfrmState) error { return ErrNotImplemented } diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go index 183601803..8659cd130 100644 --- a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -89,6 +89,7 @@ const ( CTA_USE = 11 CTA_ID = 12 CTA_TIMESTAMP = 20 + CTA_LABELS = 22 ) // enum ctattr_tuple { diff --git a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go index a60b4b09d..89dd009df 100644 --- a/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/ipset_linux.go @@ -88,6 +88,11 @@ const ( SET_ATTR_CREATE_MAX ) +const ( + IPSET_ATTR_IPADDR_IPV4 = 1 + IPSET_ATTR_IPADDR_IPV6 = 2 +) + /* ADT specific attributes */ const ( IPSET_ATTR_ETHER = IPSET_ATTR_CADT_MAX + iota + 1 diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go index edf1c8071..e10edbc09 100644 --- a/vendor/github.com/vishvananda/netlink/nl/link_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -718,8 +718,3 @@ const ( IFLA_BAREUDP_MULTIPROTO_MODE IFLA_BAREUDP_MAX = IFLA_BAREUDP_MULTIPROTO_MODE ) - -const ( - IFLA_UNSPEC = iota - IFLA_GRO_MAX_SIZE = 0x3a -) diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go index 600b942b1..d3e5ed392 100644 --- a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -330,6 +330,19 @@ func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { return msg } +type Uint32Bitfield struct { + Value uint32 + Selector uint32 +} + +func (a *Uint32Bitfield) Serialize() []byte { + return (*(*[SizeofUint32Bitfield]byte)(unsafe.Pointer(a)))[:] +} + +func DeserializeUint32Bitfield(data []byte) *Uint32Bitfield { + return (*Uint32Bitfield)(unsafe.Pointer(&data[0:SizeofUint32Bitfield][0])) +} + type Uint32Attribute struct { Type uint16 Value uint32 diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go index 03c1900ff..c26f3bf91 100644 --- a/vendor/github.com/vishvananda/netlink/nl/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -48,7 +48,9 @@ type RtNexthop struct { } func DeserializeRtNexthop(b []byte) *RtNexthop { - return (*RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0])) + return &RtNexthop{ + RtNexthop: *((*unix.RtNexthop)(unsafe.Pointer(&b[0:unix.SizeofRtNexthop][0]))), + } } func (msg *RtNexthop) Len() int { diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go index 48f292bda..d8b60c2ed 100644 --- a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -2,7 +2,10 @@ package nl import ( "encoding/binary" + "fmt" "unsafe" + + "golang.org/x/sys/unix" ) // LinkLayer @@ -42,7 +45,14 @@ const ( TCA_FCNT TCA_STATS2 TCA_STAB - TCA_MAX = TCA_STAB + TCA_PAD + TCA_DUMP_INVISIBLE + TCA_CHAIN + TCA_HW_OFFLOAD + TCA_INGRESS_BLOCK + TCA_EGRESS_BLOCK + TCA_DUMP_FLAGS + TCA_MAX = TCA_DUMP_FLAGS ) const ( @@ -56,6 +66,12 @@ const ( TCA_ACT_OPTIONS TCA_ACT_INDEX TCA_ACT_STATS + TCA_ACT_PAD + TCA_ACT_COOKIE + TCA_ACT_FLAGS + TCA_ACT_HW_STATS + TCA_ACT_USED_HW_STATS + TCA_ACT_IN_HW_COUNT TCA_ACT_MAX ) @@ -88,7 +104,7 @@ const ( SizeofTcHtbGlob = 0x14 SizeofTcU32Key = 0x10 SizeofTcU32Sel = 0x10 // without keys - SizeofTcGen = 0x14 + SizeofTcGen = 0x16 SizeofTcConnmark = SizeofTcGen + 0x04 SizeofTcCsum = SizeofTcGen + 0x04 SizeofTcMirred = SizeofTcGen + 0x08 @@ -98,6 +114,7 @@ const ( SizeofTcSfqQopt = 0x0b SizeofTcSfqRedStats = 0x18 SizeofTcSfqQoptV1 = SizeofTcSfqQopt + SizeofTcSfqRedStats + 0x1c + SizeofUint32Bitfield = 0x8 ) // struct tcmsg { @@ -804,7 +821,8 @@ const ( TCA_SKBEDIT_MARK TCA_SKBEDIT_PAD TCA_SKBEDIT_PTYPE - TCA_SKBEDIT_MAX = TCA_SKBEDIT_MARK + TCA_SKBEDIT_MASK + TCA_SKBEDIT_MAX ) type TcSkbEdit struct { @@ -1022,6 +1040,9 @@ const ( __TCA_FLOWER_MAX ) +const TCA_CLS_FLAGS_SKIP_HW = 1 << 0 /* don't offload filter to HW */ +const TCA_CLS_FLAGS_SKIP_SW = 1 << 1 /* don't use filter in SW */ + // struct tc_sfq_qopt { // unsigned quantum; /* Bytes per round allocated to flow */ // int perturb_period; /* Period of hash perturbation */ @@ -1121,3 +1142,36 @@ func DeserializeTcSfqQoptV1(b []byte) *TcSfqQoptV1 { func (x *TcSfqQoptV1) Serialize() []byte { return (*(*[SizeofTcSfqQoptV1]byte)(unsafe.Pointer(x)))[:] } + +// IPProto represents Flower ip_proto attribute +type IPProto uint8 + +const ( + IPPROTO_TCP IPProto = unix.IPPROTO_TCP + IPPROTO_UDP IPProto = unix.IPPROTO_UDP + IPPROTO_SCTP IPProto = unix.IPPROTO_SCTP + IPPROTO_ICMP IPProto = unix.IPPROTO_ICMP + IPPROTO_ICMPV6 IPProto = unix.IPPROTO_ICMPV6 +) + +func (i IPProto) Serialize() []byte { + arr := make([]byte, 1) + arr[0] = byte(i) + return arr +} + +func (i IPProto) String() string { + switch i { + case IPPROTO_TCP: + return "tcp" + case IPPROTO_UDP: + return "udp" + case IPPROTO_SCTP: + return "sctp" + case IPPROTO_ICMP: + return "icmp" + case IPPROTO_ICMPV6: + return "icmpv6" + } + return fmt.Sprintf("%d", i) +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go index dce9073f7..cdb318ba5 100644 --- a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -131,7 +131,15 @@ func (x *XfrmAddress) ToIP() net.IP { return ip } -func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet { +// family is only used when x and prefixlen are both 0 +func (x *XfrmAddress) ToIPNet(prefixlen uint8, family uint16) *net.IPNet { + empty := [SizeofXfrmAddress]byte{} + if bytes.Equal(x[:], empty[:]) && prefixlen == 0 { + if family == FAMILY_V6 { + return &net.IPNet{IP: net.ParseIP("::"), Mask: net.CIDRMask(int(prefixlen), 128)} + } + return &net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(int(prefixlen), 32)} + } ip := x.ToIP() if GetIPFamily(ip) == FAMILY_V4 { return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)} diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go index d75e7fdfc..00e93b458 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc.go +++ b/vendor/github.com/vishvananda/netlink/qdisc.go @@ -32,10 +32,11 @@ type Qdisc interface { // has a handle, a parent and a refcnt. The root qdisc of a device should // have parent == HANDLE_ROOT. type QdiscAttrs struct { - LinkIndex int - Handle uint32 - Parent uint32 - Refcnt uint32 // read only + LinkIndex int + Handle uint32 + Parent uint32 + Refcnt uint32 // read only + IngressBlock *uint32 } func (q QdiscAttrs) String() string { diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go index 9ce9e908e..e477bcd6c 100644 --- a/vendor/github.com/vishvananda/netlink/qdisc_linux.go +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -159,6 +159,9 @@ func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) + if qdisc.Attrs().IngressBlock != nil { + req.AddData(nl.NewRtAttr(nl.TCA_INGRESS_BLOCK, nl.Uint32Attr(*qdisc.Attrs().IngressBlock))) + } options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) @@ -448,6 +451,10 @@ func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { // no options for ingress } + case nl.TCA_INGRESS_BLOCK: + ingressBlock := new(uint32) + *ingressBlock = native.Uint32(attr.Value) + base.IngressBlock = ingressBlock } } *qdisc.Attrs() = base diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go index 8da886657..62bb2d468 100644 --- a/vendor/github.com/vishvananda/netlink/route_linux.go +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -41,7 +41,6 @@ func (s Scope) String() string { } } - const ( FLAG_ONLINK NextHopFlag = unix.RTNH_F_ONLINK FLAG_PERVASIVE NextHopFlag = unix.RTNH_F_PERVASIVE @@ -656,7 +655,8 @@ func RouteAdd(route *Route) error { func (h *Handle) RouteAdd(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_EXCL | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteAppend will append a route to the system. @@ -670,7 +670,8 @@ func RouteAppend(route *Route) error { func (h *Handle) RouteAppend(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_APPEND | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteAddEcmp will add a route to the system. @@ -682,7 +683,8 @@ func RouteAddEcmp(route *Route) error { func (h *Handle) RouteAddEcmp(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteReplace will add a route to the system. @@ -696,7 +698,8 @@ func RouteReplace(route *Route) error { func (h *Handle) RouteReplace(route *Route) error { flags := unix.NLM_F_CREATE | unix.NLM_F_REPLACE | unix.NLM_F_ACK req := h.newNetlinkRequest(unix.RTM_NEWROUTE, flags) - return h.routeHandle(route, req, nl.NewRtMsg()) + _, err := h.routeHandle(route, req, nl.NewRtMsg()) + return err } // RouteDel will delete a route from the system. @@ -709,12 +712,13 @@ func RouteDel(route *Route) error { // Equivalent to: `ip route del $route` func (h *Handle) RouteDel(route *Route) error { req := h.newNetlinkRequest(unix.RTM_DELROUTE, unix.NLM_F_ACK) - return h.routeHandle(route, req, nl.NewRtDelMsg()) + _, err := h.routeHandle(route, req, nl.NewRtDelMsg()) + return err } -func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { - if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { - return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil") +func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) ([][]byte, error) { + if req.NlMsghdr.Type != unix.RTM_GETROUTE && (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { + return nil, fmt.Errorf("Either Dst.IP, Src.IP or Gw must be set") } family := -1 @@ -741,11 +745,11 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.NewDst != nil { if family != -1 && family != route.NewDst.Family() { - return fmt.Errorf("new destination and destination are not the same address family") + return nil, fmt.Errorf("new destination and destination are not the same address family") } buf, err := route.NewDst.Encode() if err != nil { - return err + return nil, err } rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_NEWDST, buf)) } @@ -756,7 +760,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) buf, err := route.Encap.Encode() if err != nil { - return err + return nil, err } switch route.Encap.Type() { case nl.LWTUNNEL_ENCAP_BPF: @@ -770,7 +774,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.Src != nil { srcFamily := nl.GetIPFamily(route.Src) if family != -1 && family != srcFamily { - return fmt.Errorf("source and destination ip are not the same IP family") + return nil, fmt.Errorf("source and destination ip are not the same IP family") } family = srcFamily var srcData []byte @@ -786,7 +790,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.Gw != nil { gwFamily := nl.GetIPFamily(route.Gw) if family != -1 && family != gwFamily { - return fmt.Errorf("gateway, source, and destination ip are not the same IP family") + return nil, fmt.Errorf("gateway, source, and destination ip are not the same IP family") } family = gwFamily var gwData []byte @@ -801,7 +805,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if route.Via != nil { buf, err := route.Via.Encode() if err != nil { - return fmt.Errorf("failed to encode RTA_VIA: %v", err) + return nil, fmt.Errorf("failed to encode RTA_VIA: %v", err) } rtAttrs = append(rtAttrs, nl.NewRtAttr(unix.RTA_VIA, buf)) } @@ -820,7 +824,7 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg if nh.Gw != nil { gwFamily := nl.GetIPFamily(nh.Gw) if family != -1 && family != gwFamily { - return fmt.Errorf("gateway, source, and destination ip are not the same IP family") + return nil, fmt.Errorf("gateway, source, and destination ip are not the same IP family") } if gwFamily == FAMILY_V4 { children = append(children, nl.NewRtAttr(unix.RTA_GATEWAY, []byte(nh.Gw.To4()))) @@ -830,11 +834,11 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg } if nh.NewDst != nil { if family != -1 && family != nh.NewDst.Family() { - return fmt.Errorf("new destination and destination are not the same address family") + return nil, fmt.Errorf("new destination and destination are not the same address family") } buf, err := nh.NewDst.Encode() if err != nil { - return err + return nil, err } children = append(children, nl.NewRtAttr(unix.RTA_NEWDST, buf)) } @@ -844,14 +848,14 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg children = append(children, nl.NewRtAttr(unix.RTA_ENCAP_TYPE, buf)) buf, err := nh.Encap.Encode() if err != nil { - return err + return nil, err } children = append(children, nl.NewRtAttr(unix.RTA_ENCAP, buf)) } if nh.Via != nil { buf, err := nh.Via.Encode() if err != nil { - return err + return nil, err } children = append(children, nl.NewRtAttr(unix.RTA_VIA, buf)) } @@ -968,19 +972,22 @@ func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg msg.Flags = uint32(route.Flags) msg.Scope = uint8(route.Scope) - msg.Family = uint8(family) + // only overwrite family if it was not set in msg + if msg.Family == 0 { + msg.Family = uint8(family) + } req.AddData(msg) for _, attr := range rtAttrs { req.AddData(attr) } - b := make([]byte, 4) - native.PutUint32(b, uint32(route.LinkIndex)) - - req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + if (req.NlMsghdr.Type != unix.RTM_GETROUTE) || (req.NlMsghdr.Type == unix.RTM_GETROUTE && route.LinkIndex > 0) { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.LinkIndex)) + req.AddData(nl.NewRtAttr(unix.RTA_OIF, b)) + } - _, err := req.Execute(unix.NETLINK_ROUTE, 0) - return err + return req.Execute(unix.NETLINK_ROUTE, 0) } // RouteList gets a list of routes in the system. @@ -994,13 +1001,13 @@ func RouteList(link Link, family int) ([]Route, error) { // Equivalent to: `ip route show`. // The list can be filtered by link and ip family. func (h *Handle) RouteList(link Link, family int) ([]Route, error) { - var routeFilter *Route + routeFilter := &Route{} if link != nil { - routeFilter = &Route{ - LinkIndex: link.Attrs().Index, - } + routeFilter.LinkIndex = link.Attrs().Index + + return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) } - return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) + return h.RouteListFiltered(family, routeFilter, 0) } // RouteListFiltered gets a list of routes in the system filtered with specified rules. @@ -1013,11 +1020,9 @@ func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, e // All rules must be defined in RouteFilter struct func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { req := h.newNetlinkRequest(unix.RTM_GETROUTE, unix.NLM_F_DUMP) - rtmsg := nl.NewRtMsg() + rtmsg := &nl.RtMsg{} rtmsg.Family = uint8(family) - req.AddData(rtmsg) - - msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) + msgs, err := h.routeHandle(filter, req, rtmsg) if err != nil { return nil, err } @@ -1295,6 +1300,8 @@ type RouteGetOptions struct { Oif string VrfName string SrcAddr net.IP + UID *uint32 + Mark int } // RouteGetWithOptions gets a route to a specific destination from the host system. @@ -1381,6 +1388,21 @@ func (h *Handle) RouteGetWithOptions(destination net.IP, options *RouteGetOption req.AddData(nl.NewRtAttr(unix.RTA_SRC, srcAddr)) } + + if options.UID != nil { + uid := *options.UID + b := make([]byte, 4) + native.PutUint32(b, uid) + + req.AddData(nl.NewRtAttr(unix.RTA_UID, b)) + } + + if options.Mark > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(options.Mark)) + + req.AddData(nl.NewRtAttr(unix.RTA_MARK, b)) + } } msgs, err := req.Execute(unix.NETLINK_ROUTE, unix.RTM_NEWROUTE) diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go index 53cd3d4f6..9a6b57dac 100644 --- a/vendor/github.com/vishvananda/netlink/rule.go +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -26,6 +26,8 @@ type Rule struct { Dport *RulePortRange Sport *RulePortRange IPProto int + UIDRange *RuleUIDRange + Protocol uint8 } func (r Rule) String() string { @@ -66,3 +68,14 @@ type RulePortRange struct { Start uint16 End uint16 } + +// NewRuleUIDRange creates rule uid range. +func NewRuleUIDRange(start, end uint32) *RuleUIDRange { + return &RuleUIDRange{Start: start, End: end} +} + +// RuleUIDRange represents rule uid range. +type RuleUIDRange struct { + Start uint32 + End uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go index 3ae213880..69f53e48d 100644 --- a/vendor/github.com/vishvananda/netlink/rule_linux.go +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -168,6 +168,15 @@ func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { req.AddData(nl.NewRtAttr(nl.FRA_SPORT_RANGE, b)) } + if rule.UIDRange != nil { + b := rule.UIDRange.toRtAttrData() + req.AddData(nl.NewRtAttr(nl.FRA_UID_RANGE, b)) + } + + if rule.Protocol > 0 { + req.AddData(nl.NewRtAttr(nl.FRA_PROTOCOL, nl.Uint8Attr(rule.Protocol))) + } + _, err := req.Execute(unix.NETLINK_ROUTE, 0) return err } @@ -262,6 +271,10 @@ func (h *Handle) RuleListFiltered(family int, filter *Rule, filterMask uint64) ( rule.Dport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) case nl.FRA_SPORT_RANGE: rule.Sport = NewRulePortRange(native.Uint16(attrs[j].Value[0:2]), native.Uint16(attrs[j].Value[2:4])) + case nl.FRA_UID_RANGE: + rule.UIDRange = NewRuleUIDRange(native.Uint32(attrs[j].Value[0:4]), native.Uint32(attrs[j].Value[4:8])) + case nl.FRA_PROTOCOL: + rule.Protocol = uint8(attrs[j].Value[0]) } } @@ -299,3 +312,10 @@ func (pr *RulePortRange) toRtAttrData() []byte { native.PutUint16(b[1], pr.End) return bytes.Join(b, []byte{}) } + +func (pr *RuleUIDRange) toRtAttrData() []byte { + b := [][]byte{make([]byte, 4), make([]byte, 4)} + native.PutUint32(b[0], pr.Start) + native.PutUint32(b[1], pr.End) + return bytes.Join(b, []byte{}) +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go index 358496804..cca526051 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -75,6 +75,7 @@ func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl]) userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst) userTmpl.Saddr.FromIP(tmpl.Src) + userTmpl.Family = uint16(nl.GetIPFamily(tmpl.Dst)) userTmpl.XfrmId.Proto = uint8(tmpl.Proto) userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi)) userTmpl.Mode = uint8(tmpl.Mode) @@ -223,8 +224,8 @@ func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { var policy XfrmPolicy - policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD) - policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS) + policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, uint16(family)) + policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, uint16(family)) policy.Proto = Proto(msg.Sel.Proto) policy.DstPort = int(nl.Swap16(msg.Sel.Dport)) policy.SrcPort = int(nl.Swap16(msg.Sel.Sport)) diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go index 3cd01cbbd..0095832dc 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -117,6 +117,7 @@ type XfrmState struct { DontEncapDSCP bool OSeqMayWrap bool Replay *XfrmReplayState + Selector *XfrmPolicy } func (sa XfrmState) String() string { diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go index 5c14120fd..6b7bb8e72 100644 --- a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -209,7 +209,6 @@ func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { msg.Min = 0x100 msg.Max = 0xffffffff req.AddData(msg) - if state.Mark != nil { out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) req.AddData(out) @@ -337,7 +336,6 @@ var familyError = fmt.Errorf("family error") func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { var state XfrmState - state.Dst = msg.Id.Daddr.ToIP() state.Src = msg.Saddr.ToIP() state.Proto = Proto(msg.Id.Proto) @@ -347,20 +345,25 @@ func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { state.ReplayWindow = int(msg.ReplayWindow) lftToLimits(&msg.Lft, &state.Limits) curToStats(&msg.Curlft, &msg.Stats, &state.Statistics) + state.Selector = &XfrmPolicy{ + Dst: msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD, msg.Sel.Family), + Src: msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS, msg.Sel.Family), + Proto: Proto(msg.Sel.Proto), + DstPort: int(nl.Swap16(msg.Sel.Dport)), + SrcPort: int(nl.Swap16(msg.Sel.Sport)), + Ifindex: int(msg.Sel.Ifindex), + } return &state } func parseXfrmState(m []byte, family int) (*XfrmState, error) { msg := nl.DeserializeXfrmUsersaInfo(m) - // This is mainly for the state dump if family != FAMILY_ALL && family != int(msg.Family) { return nil, familyError } - state := xfrmStateFromXfrmUsersaInfo(msg) - attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:]) if err != nil { return nil, err @@ -515,6 +518,9 @@ func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { msg.Id.Spi = nl.Swap32(uint32(state.Spi)) msg.Reqid = uint32(state.Reqid) msg.ReplayWindow = uint8(state.ReplayWindow) - + msg.Sel = nl.XfrmSelector{} + if state.Selector != nil { + selFromPolicy(&msg.Sel, state.Selector) + } return msg } diff --git a/vendor/github.com/yusufpapurcu/wmi/README.md b/vendor/github.com/yusufpapurcu/wmi/README.md index c4a432d6d..426d1a46b 100644 --- a/vendor/github.com/yusufpapurcu/wmi/README.md +++ b/vendor/github.com/yusufpapurcu/wmi/README.md @@ -4,10 +4,3 @@ wmi Package wmi provides a WQL interface to Windows WMI. Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. - ---- - -NOTE: This project is no longer being actively maintained. If you would like -to become its new owner, please contact tlimoncelli at stack over flow dot com. - ---- diff --git a/vendor/github.com/yusufpapurcu/wmi/swbemservices.go b/vendor/github.com/yusufpapurcu/wmi/swbemservices.go index 3ff875630..a250c846d 100644 --- a/vendor/github.com/yusufpapurcu/wmi/swbemservices.go +++ b/vendor/github.com/yusufpapurcu/wmi/swbemservices.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package wmi diff --git a/vendor/github.com/yusufpapurcu/wmi/wmi.go b/vendor/github.com/yusufpapurcu/wmi/wmi.go index b4bb4f090..26c3581c9 100644 --- a/vendor/github.com/yusufpapurcu/wmi/wmi.go +++ b/vendor/github.com/yusufpapurcu/wmi/wmi.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows /* @@ -20,7 +21,6 @@ Example code to print names of running processes: println(i, v.Name) } } - */ package wmi @@ -338,11 +338,6 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat f := v.Field(i) of := f isPtr := f.Kind() == reflect.Ptr - if isPtr { - ptr := reflect.New(f.Type().Elem()) - f.Set(ptr) - f = f.Elem() - } n := v.Type().Field(i).Name if n[0] < 'A' || n[0] > 'Z' { continue @@ -367,6 +362,12 @@ func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismat } defer prop.Clear() + if isPtr && !(c.PtrNil && prop.VT == 0x1) { + ptr := reflect.New(f.Type().Elem()) + f.Set(ptr) + f = f.Elem() + } + if prop.VT == 0x1 { //VT_NULL continue } diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bson.go b/vendor/go.mongodb.org/mongo-driver/bson/bson.go index 95ffc1078..a0d818582 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bson.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bson.go @@ -27,7 +27,7 @@ type Zeroer interface { // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D = primitive.D // E represents a BSON element for a D. It is usually used inside a D. @@ -39,12 +39,12 @@ type E = primitive.E // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M = primitive.M // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A = primitive.A diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go index b0ae0e23f..5f903ebea 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go @@ -17,7 +17,7 @@ // 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for // retrieving them. // -// ValueEncoders and ValueDecoders +// # ValueEncoders and ValueDecoders // // The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON. // The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the @@ -31,7 +31,7 @@ // allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext // instance is provided and serves similar functionality to the EncodeContext. // -// Registry and RegistryBuilder +// # Registry and RegistryBuilder // // A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. See the Registry type // documentation for examples of registering various custom encoders and decoders. A Registry can be constructed using a @@ -53,15 +53,15 @@ // values decode as Go int32 and int64 instances, respectively, when decoding into a bson.D. The following code would // change the behavior so these values decode as Go int instances instead: // -// intType := reflect.TypeOf(int(0)) -// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) +// intType := reflect.TypeOf(int(0)) +// registryBuilder.RegisterTypeMapEntry(bsontype.Int32, intType).RegisterTypeMapEntry(bsontype.Int64, intType) // // 4. Kind encoder/decoders - These can be registered using the RegisterDefaultEncoder and RegisterDefaultDecoder // methods. The registered codec will be invoked when encoding or decoding values whose reflect.Kind matches the // registered reflect.Kind as long as the value's type doesn't match a registered type or hook encoder/decoder first. // These methods should be used to change the behavior for all values for a specific kind. // -// Registry Lookup Procedure +// # Registry Lookup Procedure // // When looking up an encoder in a Registry, the precedence rules are as follows: // @@ -79,7 +79,7 @@ // rules apply for decoders, with the exception that an error of type ErrNoDecoder will be returned if no decoder is // found. // -// DefaultValueEncoders and DefaultValueDecoders +// # DefaultValueEncoders and DefaultValueDecoders // // The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and // ValueDecoders for handling a wide range of Go types, including all of the types within the diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index f6f3800d4..80644023c 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -254,6 +254,7 @@ func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDe // By default, BSON documents will decode into interface{} values as bson.D. To change the default type for BSON // documents, a type map entry for bsontype.EmbeddedDocument should be registered. For example, to force BSON documents // to decode to bson.Raw, use the following code: +// // rb.RegisterTypeMapEntry(bsontype.EmbeddedDocument, reflect.TypeOf(bson.Raw{})) func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder { rb.typeMap[bt] = rt diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go index 6f406c162..62708c5c7 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go @@ -34,21 +34,21 @@ func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructT // // The properties are defined below: // -// OmitEmpty Only include the field if it's not set to the zero value for the type or to -// empty slices or maps. +// OmitEmpty Only include the field if it's not set to the zero value for the type or to +// empty slices or maps. // -// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's -// feasible while preserving the numeric value. +// MinSize Marshal an integer of a type larger than 32 bits value as an int32, if that's +// feasible while preserving the numeric value. // -// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within -// a float32. +// Truncate When unmarshaling a BSON double, it is permitted to lose precision to fit within +// a float32. // -// Inline Inline the field, which must be a struct or a map, causing all of its fields -// or keys to be processed as if they were part of the outer struct. For maps, -// keys must not conflict with the bson keys of other struct fields. +// Inline Inline the field, which must be a struct or a map, causing all of its fields +// or keys to be processed as if they were part of the outer struct. For maps, +// keys must not conflict with the bson keys of other struct fields. // -// Skip This struct field should be skipped. This is usually denoted by parsing a "-" -// for the name. +// Skip This struct field should be skipped. This is usually denoted by parsing a "-" +// for the name. // // TODO(skriptble): Add tags for undefined as nil and for null as nil. type StructTags struct { @@ -67,20 +67,20 @@ type StructTags struct { // If there is no name in the struct tag fields, the struct field name is lowercased. // The tag formats accepted are: // -// "[][,[,]]" +// "[][,[,]]" // -// `(...) bson:"[][,[,]]" (...)` +// `(...) bson:"[][,[,]]" (...)` // // An example: // -// type T struct { -// A bool -// B int "myb" -// C string "myc,omitempty" -// D string `bson:",omitempty" json:"jsonkey"` -// E int64 ",minsize" -// F int64 "myf,omitempty,minsize" -// } +// type T struct { +// A bool +// B int "myb" +// C string "myc,omitempty" +// D string `bson:",omitempty" json:"jsonkey"` +// E int64 ",minsize" +// F int64 "myf,omitempty,minsize" +// } // // A struct tag either consisting entirely of '-' or with a bson key with a // value consisting entirely of '-' will return a StructTags with Skip true and diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go new file mode 100644 index 000000000..c40973c8d --- /dev/null +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/doc.go @@ -0,0 +1,8 @@ +// Copyright (C) MongoDB, Inc. 2022-present. +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +// Package bsonoptions defines the optional configurations for the BSON codecs. +package bsonoptions diff --git a/vendor/go.mongodb.org/mongo-driver/bson/doc.go b/vendor/go.mongodb.org/mongo-driver/bson/doc.go index 5e3825a23..0134006d8 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/doc.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/doc.go @@ -9,21 +9,22 @@ // The BSON library handles marshalling and unmarshalling of values through a configurable codec system. For a description // of the codec system and examples of registering custom codecs, see the bsoncodec package. // -// Raw BSON +// # Raw BSON // // The Raw family of types is used to validate and retrieve elements from a slice of bytes. This // type is most useful when you want do lookups on BSON bytes without unmarshaling it into another // type. // // Example: -// var raw bson.Raw = ... // bytes from somewhere -// err := raw.Validate() -// if err != nil { return err } -// val := raw.Lookup("foo") -// i32, ok := val.Int32OK() -// // do something with i32... // -// Native Go Types +// var raw bson.Raw = ... // bytes from somewhere +// err := raw.Validate() +// if err != nil { return err } +// val := raw.Lookup("foo") +// i32, ok := val.Int32OK() +// // do something with i32... +// +// # Native Go Types // // The D and M types defined in this package can be used to build representations of BSON using native Go types. D is a // slice and M is a map. For more information about the use cases for these types, see the documentation on the type @@ -32,63 +33,64 @@ // Note that a D should not be constructed with duplicate key names, as that can cause undefined server behavior. // // Example: -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} // // When decoding BSON to a D or M, the following type mappings apply when unmarshalling: // -// 1. BSON int32 unmarshals to an int32. -// 2. BSON int64 unmarshals to an int64. -// 3. BSON double unmarshals to a float64. -// 4. BSON string unmarshals to a string. -// 5. BSON boolean unmarshals to a bool. -// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). -// 7. BSON array unmarshals to a bson.A. -// 8. BSON ObjectId unmarshals to a primitive.ObjectID. -// 9. BSON datetime unmarshals to a primitive.DateTime. -// 10. BSON binary unmarshals to a primitive.Binary. -// 11. BSON regular expression unmarshals to a primitive.Regex. -// 12. BSON JavaScript unmarshals to a primitive.JavaScript. -// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. -// 14. BSON timestamp unmarshals to an primitive.Timestamp. -// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. -// 16. BSON min key unmarshals to an primitive.MinKey. -// 17. BSON max key unmarshals to an primitive.MaxKey. -// 18. BSON undefined unmarshals to a primitive.Undefined. -// 19. BSON null unmarshals to nil. -// 20. BSON DBPointer unmarshals to a primitive.DBPointer. -// 21. BSON symbol unmarshals to a primitive.Symbol. +// 1. BSON int32 unmarshals to an int32. +// 2. BSON int64 unmarshals to an int64. +// 3. BSON double unmarshals to a float64. +// 4. BSON string unmarshals to a string. +// 5. BSON boolean unmarshals to a bool. +// 6. BSON embedded document unmarshals to the parent type (i.e. D for a D, M for an M). +// 7. BSON array unmarshals to a bson.A. +// 8. BSON ObjectId unmarshals to a primitive.ObjectID. +// 9. BSON datetime unmarshals to a primitive.DateTime. +// 10. BSON binary unmarshals to a primitive.Binary. +// 11. BSON regular expression unmarshals to a primitive.Regex. +// 12. BSON JavaScript unmarshals to a primitive.JavaScript. +// 13. BSON code with scope unmarshals to a primitive.CodeWithScope. +// 14. BSON timestamp unmarshals to an primitive.Timestamp. +// 15. BSON 128-bit decimal unmarshals to an primitive.Decimal128. +// 16. BSON min key unmarshals to an primitive.MinKey. +// 17. BSON max key unmarshals to an primitive.MaxKey. +// 18. BSON undefined unmarshals to a primitive.Undefined. +// 19. BSON null unmarshals to nil. +// 20. BSON DBPointer unmarshals to a primitive.DBPointer. +// 21. BSON symbol unmarshals to a primitive.Symbol. // // The above mappings also apply when marshalling a D or M to BSON. Some other useful marshalling mappings are: // -// 1. time.Time marshals to a BSON datetime. -// 2. int8, int16, and int32 marshal to a BSON int32. -// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 -// otherwise. -// 4. int64 marshals to BSON int64. -// 5. uint8 and uint16 marshal to a BSON int32. -// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, -// inclusive, and BSON int64 otherwise. -// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or -// undefined value into a string will yield the empty string.). +// 1. time.Time marshals to a BSON datetime. +// 2. int8, int16, and int32 marshal to a BSON int32. +// 3. int marshals to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, inclusive, and a BSON int64 +// otherwise. +// 4. int64 marshals to BSON int64. +// 5. uint8 and uint16 marshal to a BSON int32. +// 6. uint, uint32, and uint64 marshal to a BSON int32 if the value is between math.MinInt32 and math.MaxInt32, +// inclusive, and BSON int64 otherwise. +// 7. BSON null and undefined values will unmarshal into the zero value of a field (e.g. unmarshalling a BSON null or +// undefined value into a string will yield the empty string.). // -// Structs +// # Structs // // Structs can be marshalled/unmarshalled to/from BSON or Extended JSON. When transforming structs to/from BSON or Extended // JSON, the following rules apply: // -// 1. Only exported fields in structs will be marshalled or unmarshalled. +// 1. Only exported fields in structs will be marshalled or unmarshalled. // -// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. +// 2. When marshalling a struct, each field will be lowercased to generate the key for the corresponding BSON element. // For example, a struct field named "Foo" will generate key "foo". This can be overridden via a struct tag (e.g. // `bson:"fooField"` to generate key "fooField" instead). // -// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. +// 3. An embedded struct field is marshalled as a subdocument. The key will be the lowercased name of the field's type. // -// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is +// 4. A pointer field is marshalled as the underlying type if the pointer is non-nil. If the pointer is nil, it is // marshalled as a BSON null value. // -// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents +// 5. When unmarshalling, a field of type interface{} will follow the D/M type mappings listed above. BSON documents // unmarshalled into an interface{} field will be unmarshalled as a D. // // The encoding of each struct field can be customized by the "bson" struct tag. @@ -98,13 +100,14 @@ // are not honored, but that can be enabled by creating a StructCodec with JSONFallbackStructTagParser, like below: // // Example: -// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) +// +// structcodec, _ := bsoncodec.NewStructCodec(bsoncodec.JSONFallbackStructTagParser) // // The bson tag gives the name of the field, possibly followed by a comma-separated list of options. // The name may be empty in order to specify options without overriding the default field name. The following options can be used // to configure behavior: // -// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to +// 1. omitempty: If the omitempty struct tag is specified on a field, the field will not be marshalled if it is set to // the zero value. Fields with language primitive types such as integers, booleans, and strings are considered empty if // their value is equal to the zero value for the type (i.e. 0 for integers, false for booleans, and "" for strings). // Slices, maps, and arrays are considered empty if they are of length zero. Interfaces and pointers are considered @@ -113,16 +116,16 @@ // never considered empty and will be marshalled as embedded documents. // NOTE: It is recommended that this tag be used for all slice and map fields. // -// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of +// 2. minsize: If the minsize struct tag is specified on a field of type int64, uint, uint32, or uint64 and the value of // the field can fit in a signed int32, the field will be serialized as a BSON int32 rather than a BSON int64. For other // types, this tag is ignored. // -// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled +// 3. truncate: If the truncate struct tag is specified on a field with a non-float numeric type, BSON doubles unmarshalled // into that field will be truncated at the decimal point. For example, if 3.14 is unmarshalled into a field of type int, // it will be unmarshalled as 3. If this tag is not specified, the decoder will throw an error if the value cannot be // decoded without losing precision. For float64 or non-numeric types, this tag is ignored. // -// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when +// 4. inline: If the inline struct tag is specified for a struct or map field, the field will be "flattened" when // marshalling and "un-flattened" when unmarshalling. This means that all of the fields in that struct/map will be // pulled up one level and will become top-level fields rather than being fields in a nested document. For example, if a // map field named "Map" with value map[string]interface{}{"foo": "bar"} is inlined, the resulting document will be @@ -132,7 +135,7 @@ // This tag can be used with fields that are pointers to structs. If an inlined pointer field is nil, it will not be // marshalled. For fields that are not maps or structs, this tag is ignored. // -// Marshalling and Unmarshalling +// # Marshalling and Unmarshalling // // Manually marshalling and unmarshalling can be done with the Marshal and Unmarshal family of functions. package bson diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go index ffe4eed07..ba7c9112e 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go @@ -191,10 +191,9 @@ func (d Decimal128) IsNaN() bool { // IsInf returns: // -// +1 d == Infinity -// 0 other case -// -1 d == -Infinity -// +// +1 d == Infinity +// 0 other case +// -1 d == -Infinity func (d Decimal128) IsInf() int { if d.h>>58&(1<<5-1) != 0x1E { return 0 diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go index 652898fea..ded367316 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go @@ -61,7 +61,9 @@ func (id ObjectID) Timestamp() time.Time { // Hex returns the hex encoding of the ObjectID as a string. func (id ObjectID) Hex() string { - return hex.EncodeToString(id[:]) + var buf [24]byte + hex.Encode(buf[:], id[:]) + return string(buf[:]) } func (id ObjectID) String() string { diff --git a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go index b3cba1bf9..c72ccc1c4 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go @@ -182,7 +182,7 @@ type MaxKey struct{} // // Example usage: // -// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} +// bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}} type D []E // Map creates a map from the elements of the D. @@ -206,12 +206,12 @@ type E struct { // // Example usage: // -// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} +// bson.M{"foo": "bar", "hello": "world", "pi": 3.14159} type M map[string]interface{} // An A is an ordered representation of a BSON array. // // Example usage: // -// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} +// bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}} type A []interface{} diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go index 6c858a010..e35bd0cd9 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go @@ -96,8 +96,8 @@ func (ds *DocumentSequence) Empty() bool { } } -//ResetIterator resets the iteration point for the Next method to the beginning of the document -//sequence. +// ResetIterator resets the iteration point for the Next method to the beginning of the document +// sequence. func (ds *DocumentSequence) ResetIterator() { if ds == nil { return diff --git a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go index 54aa617cf..789d2b982 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go +++ b/vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go @@ -323,7 +323,7 @@ func (v Value) String() string { if !ok { return "" } - return fmt.Sprintf(`{"$timestamp":{"t":"%s","i":"%s"}}`, strconv.FormatUint(uint64(t), 10), strconv.FormatUint(uint64(i), 10)) + return fmt.Sprintf(`{"$timestamp":{"t":%v,"i":%v}}`, t, i) case bsontype.Int64: i64, ok := v.Int64OK() if !ok { diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes deleted file mode 100644 index 314766e91..000000000 --- a/vendor/go.opentelemetry.io/otel/.gitattributes +++ /dev/null @@ -1,3 +0,0 @@ -* text=auto eol=lf -*.{cmd,[cC][mM][dD]} text eol=crlf -*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore deleted file mode 100644 index 0b605b3d6..000000000 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ /dev/null @@ -1,21 +0,0 @@ -.DS_Store -Thumbs.db - -.tools/ -.idea/ -.vscode/ -*.iml -*.so -coverage.* - -gen/ - -/example/fib/fib -/example/fib/traces.txt -/example/jaeger/jaeger -/example/namedtracer/namedtracer -/example/opencensus/opencensus -/example/passthrough/passthrough -/example/prometheus/prometheus -/example/zipkin/zipkin -/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules deleted file mode 100644 index 38a1f5698..000000000 --- a/vendor/go.opentelemetry.io/otel/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "opentelemetry-proto"] - path = exporters/otlp/internal/opentelemetry-proto - url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml deleted file mode 100644 index 0f099f575..000000000 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ /dev/null @@ -1,244 +0,0 @@ -# See https://github.com/golangci/golangci-lint#config-file -run: - issues-exit-code: 1 #Default - tests: true #Default - -linters: - # Disable everything by default so upgrades to not include new "default - # enabled" linters. - disable-all: true - # Specifically enable linters we want to use. - enable: - - depguard - - errcheck - - godot - - gofmt - - goimports - - gosimple - - govet - - ineffassign - - misspell - - revive - - staticcheck - - typecheck - - unused - -issues: - # Maximum issues count per one linter. - # Set to 0 to disable. - # Default: 50 - # Setting to unlimited so the linter only is run once to debug all issues. - max-issues-per-linter: 0 - # Maximum count of issues with the same text. - # Set to 0 to disable. - # Default: 3 - # Setting to unlimited so the linter only is run once to debug all issues. - max-same-issues: 0 - # Excluding configuration per-path, per-linter, per-text and per-source. - exclude-rules: - # TODO: Having appropriate comments for exported objects helps development, - # even for objects in internal packages. Appropriate comments for all - # exported objects should be added and this exclusion removed. - - path: '.*internal/.*' - text: "exported (method|function|type|const) (.+) should have comment or be unexported" - linters: - - revive - # Yes, they are, but it's okay in a test. - - path: _test\.go - text: "exported func.*returns unexported type.*which can be annoying to use" - linters: - - revive - # Example test functions should be treated like main. - - path: example.*_test\.go - text: "calls to (.+) only in main[(][)] or init[(][)] functions" - linters: - - revive - include: - # revive exported should have comment or be unexported. - - EXC0012 - # revive package comment should be of the form ... - - EXC0013 - -linters-settings: - depguard: - # Check the list against standard lib. - # Default: false - include-go-root: true - # A list of packages for the list type specified. - # Default: [] - packages: - - "crypto/md5" - - "crypto/sha1" - - "crypto/**/pkix" - ignore-file-rules: - - "**/*_test.go" - additional-guards: - # Do not allow testing packages in non-test files. - - list-type: denylist - include-go-root: true - packages: - - testing - - github.com/stretchr/testify - ignore-file-rules: - - "**/*_test.go" - - "**/*test/*.go" - - "**/internal/matchers/*.go" - godot: - exclude: - # Exclude sentence fragments for lists. - - '^[ ]*[-•]' - # Exclude sentences prefixing a list. - - ':$' - goimports: - local-prefixes: go.opentelemetry.io - misspell: - locale: US - ignore-words: - - cancelled - revive: - # Sets the default failure confidence. - # This means that linting errors with less than 0.8 confidence will be ignored. - # Default: 0.8 - confidence: 0.01 - rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - - name: blank-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - - name: bool-literal-in-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - - name: constant-logical-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - - name: context-as-argument - disabled: true - arguments: - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type - - name: context-keys-type - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - - name: deep-exit - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - - name: defer - disabled: false - arguments: - - ["call-chain", "loop"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - - name: dot-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - - name: duplicated-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - - name: early-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block - - name: empty-block - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - - name: empty-lines - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - - name: error-naming - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - - name: error-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - - name: error-strings - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - - name: errorf - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - - name: exported - disabled: false - arguments: - - "sayRepetitiveInsteadOfStutters" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - - name: flag-parameter - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - - name: identical-branches - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - - name: if-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement - - name: increment-decrement - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - - name: indent-error-flow - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - - name: import-shadowing - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments - - name: package-comments - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - - name: range - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - - name: range-val-in-closure - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - - name: range-val-address - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - - name: redefines-builtin-id - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - - name: string-format - disabled: false - arguments: - - - panic - - '/^[^\n]*$/' - - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - - name: struct-tag - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - - name: superfluous-else - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - disabled: false - arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion - - name: unconditional-recursion - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - - name: unexported-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - - name: unhandled-error - disabled: false - arguments: - - "fmt.Fprint" - - "fmt.Fprintf" - - "fmt.Fprintln" - - "fmt.Print" - - "fmt.Printf" - - "fmt.Println" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - - name: unnecessary-stmt - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - - name: useless-break - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - - name: waitgroup-by-value - disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore deleted file mode 100644 index 40d62fa2e..000000000 --- a/vendor/go.opentelemetry.io/otel/.lycheeignore +++ /dev/null @@ -1,6 +0,0 @@ -http://localhost -http://jaeger-collector -https://github.com/open-telemetry/opentelemetry-go/milestone/ -https://github.com/open-telemetry/opentelemetry-go/projects -file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries -file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml deleted file mode 100644 index 3202496c3..000000000 --- a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Default state for all rules -default: true - -# ul-style -MD004: false - -# hard-tabs -MD010: false - -# line-length -MD013: false - -# no-duplicate-header -MD024: - siblings_only: true - -#single-title -MD025: false - -# ol-prefix -MD029: - style: ordered - -# no-inline-html -MD033: false - -# fenced-code-language -MD040: false - diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md deleted file mode 100644 index 1d9726f60..000000000 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ /dev/null @@ -1,2369 +0,0 @@ -# Changelog - -All notable changes to this project will be documented in this file. - -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - -This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - -## [Unreleased] - -## [1.14.0/0.37.0/0.0.4] 2023-02-27 - -This release is the last to support [Go 1.18]. -The next release will require at least [Go 1.19]. - -### Added - -- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697) -- Support [Go 1.20]. (#3693) -- The `go.opentelemetry.io/otel/semconv/v1.18.0` package. - The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719) - - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - - `OtelScopeNameKey` -> `OTelScopeNameKey` - - `OtelScopeVersionKey` -> `OTelScopeVersionKey` - - `OtelLibraryNameKey` -> `OTelLibraryNameKey` - - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey` - - `OtelStatusCodeKey` -> `OTelStatusCodeKey` - - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey` - - `OtelStatusCodeOk` -> `OTelStatusCodeOk` - - `OtelStatusCodeError` -> `OTelStatusCodeError` - - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included: - - `OtelScopeName` -> `OTelScopeName` - - `OtelScopeVersion` -> `OTelScopeVersion` - - `OtelLibraryName` -> `OTelLibraryName` - - `OtelLibraryVersion` -> `OTelLibraryVersion` - - `OtelStatusDescription` -> `OTelStatusDescription` -- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state. - See the [README](./bridge/opentracing/README.md) for more information. (#3570) -- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738) -- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739) -- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763) - - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports. - - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted. - -### Changed - -- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679) -- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into. - This change is made to enable memory reuse by SDK users. (#3732) -- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776) - -### Fixed - -- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725) -- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724) -- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733) -- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743) -- Data race issue in OTLP exporter retry mechanism. (#3755, #3756) -- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772) -- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777) - -### Deprecated - -- The `go.opentelemetry.io/otel/metric/unit` package is deprecated. - Use the equivalent unit string instead. (#3776) - - Use `"1"` instead of `unit.Dimensionless` - - Use `"By"` instead of `unit.Bytes` - - Use `"ms"` instead of `unit.Milliseconds` - -## [1.13.0/0.36.0] 2023-02-07 - -### Added - -- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions. - These functions ensure semantic convention type correctness. (#3675) - -### Fixed - -- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687) - - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv` - - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv` - -### Removed - -- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631) -- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631) - -## [1.12.0/0.35.0] 2023-01-28 - -### Added - -- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. - This options is used to configure `int64` Observer callbacks during their creation. (#3507) -- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. - This options is used to configure `float64` Observer callbacks during their creation. (#3507) -- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. - These additions are used to enable external metric Producers. (#3524) -- The `Callback` function type to `go.opentelemetry.io/otel/metric`. - This new named function type is registered with a `Meter`. (#3564) -- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. - The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) - - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. - - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. - - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. -- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. - The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) -- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. - The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) -- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. - The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) -- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. - These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) - - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` - - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` - - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` - - `Int64ObservableCounter` replaces the `asyncint64.Counter` - - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` - - `Int64ObservableGauge` replaces the `asyncint64.Gauge` - - `Float64Counter` replaces the `syncfloat64.Counter` - - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` - - `Float64Histogram` replaces the `syncfloat64.Histogram` - - `Int64Counter` replaces the `syncint64.Counter` - - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` - - `Int64Histogram` replaces the `syncint64.Histogram` -- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. - This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) -- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. - This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) -- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. - The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) - -### Changed - -- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) -- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) - - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. - - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. - - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. - - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. -- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. - This `Registration` can be used to unregister callbacks. (#3522) -- Global error handler uses an atomic value instead of a mutex. (#3543) -- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) -- Global logger uses an atomic value instead of a mutex. (#3545) -- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) -- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. - This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) -- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. - Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) -- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) -- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) - - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` - - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` - - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` - - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` - - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` - - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` -- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. - - The named `Callback` replaces the inline function parameter. (#3564) - - `Callback` is required to return an error. (#3576) - - `Callback` accepts the added `Observer` parameter added. - This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) - - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) -- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. - This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. - Instead it uses the `net.sock.peer` attributes. (#3581) -- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) - -### Fixed - -- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) -- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. - Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) - -### Deprecated - -- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. - Use `NewMetricProducer` instead. (#3541) -- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. - Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) -- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. - Use `NewTracerProvider` instead. (#3116) - -### Removed - -- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Int64ObservableCounter` - - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` - - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Float64ObservableCounter` - - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` - - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Int64Counter` - - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` - - The `Histogram` method is replaced by `Meter.Int64Histogram` -- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. - Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) - - The `Counter` method is replaced by `Meter.Float64Counter` - - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` - - The `Histogram` method is replaced by `Meter.Float64Histogram` - -## [1.11.2/0.34.0] 2022-12-05 - -### Added - -- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. - This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) -- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. - This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) -- OTLP exporters now recognize: (#3363) - - `OTEL_EXPORTER_OTLP_INSECURE` - - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` - - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` - - `OTEL_EXPORTER_OTLP_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` - - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` -- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. - These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) -- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. - These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) -- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) -- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) - -### Changed - -- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. - Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. - The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) -- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) -- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) -- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) -- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) - -### Fixed - -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) -- Remove comparable requirement for `Reader`s. (#3387) -- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) -- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) -- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) -- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) -- Reenabled Attribute Filters in the Metric SDK. (#3396) -- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) -- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) -- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) -- Prevent duplicate Prometheus description, unit, and type. (#3469) -- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) - -### Removed - -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) -- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) - -### Deprecated - -- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. - Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) - -## [1.11.1/0.33.0] 2022-10-19 - -### Added - -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. - By default, it will register with the default Prometheus registerer. - A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) -- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) - -### Changed - -- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. - It will return an error if the exporter fails to register with Prometheus. (#3239) - -### Fixed - -- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) -- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. - This fixes the implementation to be compliant with the W3C specification. (#3226) -- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) -- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) -- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) -- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) -- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) -- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. - Instead the exporter is defined as an "unchecked" collector for Prometheus. - This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) -- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. - This can be disabled using the `WithoutUnits()` option added to that package. (#3352) - -## [1.11.0/0.32.3] 2022-10-12 - -### Added - -- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) - -### Changed - -- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) -- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. - This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) - -## [0.32.2] Metric SDK (Alpha) - 2022-10-11 - -### Added - -- Added an example of using metric views to customize instruments. (#3177) -- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) - -### Changed - -- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) -- Update histogram default bounds to match the requirements of the latest specification. (#3222) -- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) - -### Fixed - -- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) -- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) -- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) -- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) -- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) - -## [0.32.1] Metric SDK (Alpha) - 2022-09-22 - -### Changed - -- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. - Invalid characters are replaced with `_`. (#3212) - -### Added - -- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) -- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) - -### Fixed - -- Updated go.mods to point to valid versions of the sdk. (#3216) -- Set the `MeterProvider` resource on all exported metric data. (#3218) - -## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 - -### Changed - -- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. - Please see the package documentation for how the new SDK is initialized and configured. (#3175) -- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) - -### Removed - -- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. - A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. - A replacement package that supports the new metric SDK will be added back in a future release. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) -- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) -- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) -- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) - -## [1.10.0] - 2022-09-09 - -### Added - -- Support Go 1.19. (#3077) - Include compatibility testing and document support. (#3077) -- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) -- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) - -### Changed - -- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) -- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) -- All exporters will be shutdown even if one reports an error (#3091) -- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) - -## [1.9.0/0.0.3] - 2022-08-01 - -### Added - -- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) -- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. - The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) -- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. - The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) -- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) - -### Fixed - -- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) - -## [1.8.0/0.31.0] - 2022-07-08 - -### Added - -- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods -of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) - -### Changed - -- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) -- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) -- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) - -### Removed - -- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) - -### Deprecated - -- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. - Use the equivalent `Scope` struct instead. (#2977) -- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. - Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) - -## [1.7.0/0.30.0] - 2022-04-28 - -### Added - -- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. - The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) -- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. - The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) -- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. - The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) -- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) - -### Fixed - -- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) -- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) - -### Changed - -- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) -- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. - The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) -- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. - Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) - -### Deprecated - -- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `Iterator.Attribute` method instead. (#2790) -- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) -- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. - Use the equivalent `MergeIterator.Attribute` method instead. (#2790) - -### Removed - -- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) -- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) - -## [0.29.0] - 2022-04-11 - -### Added - -- The metrics global package was added back into several test files. (#2764) -- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. - This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) - -### Removed - -- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. - Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) - -### Changed - -- Don't panic anymore when setting a global MeterProvider to itself. (#2749) -- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. - This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) - -## [1.6.3] - 2022-04-07 - -### Fixed - -- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) - -## [1.6.2] - 2022-04-06 - -### Changed - -- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) -- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. - This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) - -## [1.6.1] - 2022-03-28 - -### Fixed - -- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. - Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) - -### Security - -- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. - This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) - -## [1.6.0/0.28.0] - 2022-03-23 - -### ⚠️ Notice ⚠️ - -This update is a breaking change of the unstable Metrics API. -Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. - -### Added - -- Add metrics exponential histogram support. - New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) -- Add Go 1.18 to our compatibility tests. (#2679) -- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) -- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) - -### Changed - -- The metrics API has been significantly changed to match the revised OpenTelemetry specification. - High-level changes include: - - - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. - These `InstrumentProvider`s are managed with a `Meter`. - - Synchronous and asynchronous instruments are grouped into their own packages based on value types. - - Asynchronous callbacks can now be registered with a `Meter`. - - Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) - -### Fixed - -- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) - -## [1.5.0] - 2022-03-16 - -### Added - -- Log the Exporters configuration in the TracerProviders message. (#2578) -- Added support to configure the span limits with environment variables. - The following environment variables are supported. (#2606, #2637) - - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` - - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` - - `OTEL_SPAN_EVENT_COUNT_LIMIT` - - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` - - `OTEL_SPAN_LINK_COUNT_LIMIT` - - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` - - If the provided environment variables are invalid (negative), the default values would be used. -- Rename the `gc` runtime name to `go` (#2560) -- Add resource container ID detection. (#2418) -- Add span attribute value length limit. - The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. - The default limit for this resource is "unlimited". (#2637) -- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. - This option replaces the `WithSpanLimits` option. - Zero or negative values will not be changed to the default value like `WithSpanLimits` does. - Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. - Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) - -### Changed - -- Drop oldest tracestate `Member` when capacity is reached. (#2592) -- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) -- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) -- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) -- Introduce new internal `envconfig` package for OTLP exporters. (#2608) -- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) - -### Fixed - -- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) -- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) -- Unlimited span limits are now supported (negative values). (#2636, #2637) - -### Deprecated - -- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. - Use `WithRawSpanLimits` instead. - That option allows setting unlimited and zero limits, this option does not. - This option will be kept until the next major version incremented release. (#2637) - -## [1.4.1] - 2022-02-16 - -### Fixed - -- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) - -## [1.4.0] - 2022-02-11 - -### Added - -- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) -- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. - To enable use a logger with Verbosity (V level) `>=1`. (#2500) -- Added support to configure the batch span-processor with environment variables. - The following environment variables are used. (#2515) - - `OTEL_BSP_SCHEDULE_DELAY` - - `OTEL_BSP_EXPORT_TIMEOUT` - - `OTEL_BSP_MAX_QUEUE_SIZE`. - - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` - -### Changed - -- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) - -### Deprecated - -- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. - Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) -- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) - -### Fixed - -- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) -- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) -- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) -- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) -- W3C baggage will now decode urlescaped values. (#2529) -- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) -- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. - Instead of dropping the least-recently-used attribute, the last added attribute is dropped. - This drop order still only applies to attributes with unique keys not already contained in the span. - If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) - -### Removed - -- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) - - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) - - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) - - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) - -## [1.3.0] - 2021-12-10 - -### ⚠️ Notice ⚠️ - -We have updated the project minimum supported Go version to 1.16 - -### Added - -- Added an internal Logger. - This can be used by the SDK and API to provide users with feedback of the internal state. - To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) -- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) -- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) - -### Changed - -- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) -- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) -- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) -- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) - -### Fixed - -- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. - Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. - When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) -- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) -- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) - -### Deprecated - -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) -- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) - -### Removed - -- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) -- Remove the metric Bound Instruments interface and implementations. (#2399) -- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) -- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) - -## [1.2.0] - 2021-11-12 - -### Changed - -- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) -- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) -- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: - - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` - - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. - - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) -- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) -- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) - -## [1.1.0] - 2021-10-27 - -### Added - -- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) -- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. - The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) -- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. - The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) -- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. - The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) - - When upgrading from the `semconv/v1.4.0` package note the following name changes: - - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` - - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` - - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` - - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` - - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` - - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` - -### Changed - -- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). - -### Fixed - -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) -- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) -- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) - -## [1.0.1] - 2021-10-01 - -### Fixed - -- json stdout exporter no longer crashes due to concurrency bug. (#2265) - -## [Metrics 0.24.0] - 2021-10-01 - -### Changed - -- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) -- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) - - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. - - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. - -## [1.0.0] - 2021-09-20 - -This is the first stable release for the project. -This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). - -### Added - -- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) - -### Fixed - -- Slice-valued attributes can correctly be used as map keys. (#2223) - -### Removed - -- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) -- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) -- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) -- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. - Use the typed functions and methods added to the package instead. (#2235) - - The `Key.Array` method is removed. - - The `Array` function is removed. - - The `Any` function is removed. - - The `ArrayValue` function is removed. - - The `AsArray` function is removed. - -## [1.0.0-RC3] - 2021-09-02 - -### Added - -- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) -- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) -- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) - - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. -- Added the `go.opentelemetry.io/otel/example/fib` example package. - Included is an example application that computes Fibonacci numbers. (#2203) - -### Changed - -- Metric instruments have been renamed to match the (feature-frozen) metric API specification: - - ValueRecorder becomes Histogram - - ValueObserver becomes Gauge - - SumObserver becomes CounterObserver - - UpDownSumObserver becomes UpDownCounterObserver - The API exported from this project is still considered experimental. (#2202) -- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) -- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) -- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) -- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) - -### Deprecated - -- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. - All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. - The functions from that package should be used instead. (#2166) -- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. - Use the typed `*Slice` functions and types added to the package instead. (#2162) -- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. - Use the typed functions instead. (#2181) -- The `go.opentelemetry.io/otel/oteltest` package is deprecated. - The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) - -### Removed - -- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) - -### Fixed - -- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) -- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) -- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) -- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) -- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) -- Fixed typos in resources.go. (#2201) - -## [1.0.0-RC2] - 2021-07-26 - -### Added - -- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) -- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) -- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) -- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) -- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. - This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. - For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) -- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. - This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) - -### Changed - -- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) -- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) - -### Deprecated - -- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) -- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) -- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. - Use the `trace.ParseTraceState` function instead. (#2122) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) -- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. - The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) -- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. - The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) -- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) - -### Fixed - -- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) -- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) -- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) -- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. - This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) -- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) -- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) - -## [Experimental Metrics v0.22.0] - 2021-07-19 - -### Added - -- Adds HTTP support for OTLP metrics exporter. (#2022) - -### Removed - -- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) - -## [1.0.0-RC1] / 0.21.0 - 2021-06-18 - -With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` -while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules -with major version 0. - -### Added - -- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) - - The following status codes are defined as transient errors: - | gRPC Status Code | Description | - | ---------------- | ----------- | - | 1 | Cancelled | - | 4 | Deadline Exceeded | - | 8 | Resource Exhausted | - | 10 | Aborted | - | 10 | Out of Range | - | 14 | Unavailable | - | 15 | Data Loss | -- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) -- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. - This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) -- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) -- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) -- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) -- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. - It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) -- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. - This method returns the number of list-members the `TraceState` holds. (#1937) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. - Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) -- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) -- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. - These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) -- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) -- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) -- Several builtin resource detectors now correctly populate the schema URL. (#1938) -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. -- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) -- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) -- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) - -### Changed - -- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. - `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) -- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) -- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) -- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) -- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) -- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) -- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) -- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. - This method returns the status of a span using the new `Status` type. (#1874) -- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. - This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) -- Unembed `SpanContext` in `Link`. (#1877) -- Generate Semantic conventions from the specification YAML. (#1891) -- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) -- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) -- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) -- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) -- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) -- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) -- Refactored option types according to the contribution style guide. (#1882) -- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. - This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. - The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) -- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) -- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) -- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) -- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) -- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) -- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) -- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) - -### Deprecated - -- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) -- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) - -### Removed - -- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) -- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) -- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. - The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. - The `IsRecording` method returns if the span is recording or not. - A read-only span value does not need to know if updates to it will be recorded or not. - By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) -- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. - The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. - When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) -- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. - Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. - The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) - - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) -- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) -- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) -- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. - Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) -- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. - These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) -- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) -- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) - -### Fixed - -- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) -- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) -- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) -- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) -- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) -- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) -- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) - -### Security - -## [0.20.0] - 2021-04-23 - -### Added - -- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) -- Adds semantic conventions for exceptions. (#1492) -- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` - These environment variables can be used to override Jaeger agent hostname and port (#1752) -- Option `ExportTimeout` was added to batch span processor. (#1755) -- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) -- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) -- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) -- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) -- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) -- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) -- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) - - `process.pid` - - `process.executable.name` - - `process.executable.path` - - `process.command_args` - - `process.owner` - - `process.runtime.name` - - `process.runtime.version` - - `process.runtime.description` -- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) -- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) - - `OTEL_EXPORTER_OTLP_ENDPOINT` - - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` - - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` - - `OTEL_EXPORTER_OTLP_HEADERS` - - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` - - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` - - `OTEL_EXPORTER_OTLP_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` - - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` - - `OTEL_EXPORTER_OTLP_TIMEOUT` - - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` - - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` - - `OTEL_EXPORTER_OTLP_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` - - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` -- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) -- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) - -### Fixed - -- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) -- The Jaeger exporter now correctly sets tags for the Span status code and message. - This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) -- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. - Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) -- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) -- Fixed typo for default service name in Jaeger Exporter. (#1797) -- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) -- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. - Instead, the exporter now splits the batch into smaller sendable batches. (#1828) - -### Changed - -- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) -- Jaeger exporter was updated to use thrift v0.14.1. (#1712) -- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) -- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) -- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. - The Span's SpanContext can now self-identify as being remote or not. - This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) -- Improve OTLP/gRPC exporter connection errors. (#1737) -- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. - The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) -- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. - This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) -- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` - to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) -- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) -- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. - It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) -- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) -- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) -- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) -- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) -- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) -- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. - This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) -- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) -- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. - The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) -- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) - -### Removed - -- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` - These environment variables will no longer be used to override values of the Jaeger exporter (#1752) -- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. - This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. - To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) -- Setting error status while recording error with Span from oteltest package. (#1729) -- The concept of a remote and local Span stored in a context is unified to just the current Span. - Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. - Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. - If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) -- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. - This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) -- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) -- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. - The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) -- Remove the `WithDisabled` option from the Jaeger exporter. - To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) -- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. - These functions for retrieving specific environment variable values are redundant of other internal functions and - are not intended for end user use. (#1824) -- Removed the Jaeger exporter `WithSDKOptions` `Option`. - This option was used to set SDK options for the exporter creation convenience functions. - These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. - If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) -- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. - The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) -- The Jaeger exporter `Option` type is removed. - The type is no longer used by the exporter to configure anything. - All the previous configurations these options provided were duplicates of SDK configuration. - They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) - -## [0.19.0] - 2021-03-18 - -### Added - -- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) -- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) -- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) -- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) -- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) - -### Changed - -- `trace.SpanContext` is now immutable and has no exported fields. (#1573) - - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. -- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) -- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) -- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) -- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) -- Added non-empty string check for trace `Attribute` keys. (#1659) -- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) -- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) -- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) -- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) -- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) - -### Removed - -- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) -- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) -- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. - These are now returned as a SpanProcessor interface from their respective constructors. (#1638) -- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) -- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) -- Removed `jaeger.WithProcess` configuration option. (#1673) -- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) - -### Fixed - -- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) -- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) -- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) -- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) -- Synchronization issues in global trace delegate implementation. (#1686) -- Reduced excess memory usage by global `TracerProvider`. (#1687) - -## [0.18.0] - 2021-03-03 - -### Added - -- Added `resource.Default()` for use with meter and tracer providers. (#1507) -- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) -- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) -- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) -- Compatibility testing suite in the CI system for the following systems. (#1567) - | OS | Go Version | Architecture | - | ------- | ---------- | ------------ | - | Ubuntu | 1.15 | amd64 | - | Ubuntu | 1.14 | amd64 | - | Ubuntu | 1.15 | 386 | - | Ubuntu | 1.14 | 386 | - | MacOS | 1.15 | amd64 | - | MacOS | 1.14 | amd64 | - | Windows | 1.15 | amd64 | - | Windows | 1.14 | amd64 | - | Windows | 1.15 | 386 | - | Windows | 1.14 | 386 | - -### Changed - -- Replaced interface `oteltest.SpanRecorder` with its existing implementation - `StandardSpanRecorder`. (#1542) -- Default span limit values to 128. (#1535) -- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) -- Renamed the `otel/label` package to `otel/attribute`. (#1541) -- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) -- Parallelize the CI linting and testing. (#1567) -- Stagger timestamps in exact aggregator tests. (#1569) -- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) -- Prevent end-users from implementing some interfaces (#1575) - - ``` - "otel/exporters/otlp/otlphttp".Option - "otel/exporters/stdout".Option - "otel/oteltest".Option - "otel/trace".TracerOption - "otel/trace".SpanOption - "otel/trace".EventOption - "otel/trace".LifeCycleOption - "otel/trace".InstrumentationOption - "otel/sdk/resource".Option - "otel/sdk/trace".ParentBasedSamplerOption - "otel/sdk/trace".ReadOnlySpan - "otel/sdk/trace".ReadWriteSpan - ``` - -### Removed - -- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) -- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) -- Removed the `test-386` make target. - This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) - -### Fixed - -- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) -- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) -- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) -- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) -- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) - -## [0.17.0] - 2021-02-12 - -### Changed - -- Rename project default branch from `master` to `main`. (#1505) -- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) -- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) -- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) -- Move metric-related public global APIs from otel to otel/metric/global. (#1528) - -## Fixed - -- Fixed otlpgrpc reconnection issue. -- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) -- The otel-collector example now uses the default OTLP receiver port of the collector. - -## [0.16.0] - 2021-01-13 - -### Added - -- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) -- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) -- Added documentation about the project's versioning policy. (#1388) -- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) -- Added codeql worfklow to GitHub Actions (#1428) -- Added Gosec workflow to GitHub Actions (#1429) -- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) -- Add an OpenCensus exporter bridge. (#1444) - -### Changed - -- Rename `internal/testing` to `internal/internaltest`. (#1449) -- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) -- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) -- Improve span duration accuracy. (#1360) -- Migrated CI/CD from CircleCI to GitHub Actions (#1382) -- Remove duplicate checkout from GitHub Actions workflow (#1407) -- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) -- Metric `exact` aggregator includes per-point timestamps (#1412) -- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) -- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) -- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) -- Unify endpoint API that related to OTel exporter. (#1401) -- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) -- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) -- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) -- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) -- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) -- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) -- Metric Push and Pull Controller components are combined into a single "basic" Controller: - - `WithExporter()` and `Start()` to configure Push behavior - - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior - - `Start()` and `Stop()` accept Context. (#1378) -- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) - -### Removed - -- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) -- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) -- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) - -### Fixed - -- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) - -## [0.15.0] - 2020-12-10 - -### Added - -- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) - -### Changed - -- The Zipkin exporter now uses the Span status code to determine. (#1328) -- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) -- Move the OpenCensus example into `example` directory. (#1359) -- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) -- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) -- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) - -### Fixed - -- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) - -## [0.14.0] - 2020-11-19 - -### Added - -- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) -- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) -- `SpanContextFromContext` returns `SpanContext` from context. (#1255) -- `TraceState` has been added to `SpanContext`. (#1340) -- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) -- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) -- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) -- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) - -### Changed - -- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) - - `ID` has been renamed to `TraceID`. - - `IDFromHex` has been renamed to `TraceIDFromHex`. - - `EmptySpanContext` is removed. -- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) -- OTLP Exporter updates: - - supports OTLP v0.6.0 (#1230, #1354) - - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) -- The Sampler is now called on local child spans. (#1233) -- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) -- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. - This matches the returned type and fixes misuse of the term metric. (#1240) -- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) -- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) -- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) -- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) -- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) -- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) -- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) -- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) -- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) -- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) -- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) -- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and - `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) -- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) -- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) -- Updated span collection limits for attribute, event and link counts to 1000 (#1318) -- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) - -### Removed - -- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) -- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. - It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) -- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. - `Tracer` and `Span` from the same module should be used in their place instead. (#1306) -- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) -- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) - -### Fixed - -- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) -- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) -- Fix condition in `label.Any`. (#1299) -- Fix global `TracerProvider` to pass options to its configured provider. (#1329) -- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) - -## [0.13.0] - 2020-10-08 - -### Added - -- OTLP Metric exporter supports Histogram aggregation. (#1209) -- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) -- A Baggage API to implement the OpenTelemetry specification. (#1217) -- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) - -### Changed - -- Set default propagator to no-op propagator. (#1184) -- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) -- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) -- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. - They now are `Unset`, `Error`, and `Ok`. - They no longer track the gRPC codes. (#1214) -- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) -- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) -- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) - -### Fixed - -- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) - -### Removed - -- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) -- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. - The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) -- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) -- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) -- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) -- Nested array/slice support has been removed. (#1226) - -## [0.12.0] - 2020-09-24 - -### Added - -- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) -- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. - This addition was made to conform with our project option conventions. (#1155) -- Instrumentation library information was added to the Zipkin exporter. (#1119) -- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) -- More semantic conventions for k8s as resource attributes. (#1167) - -### Changed - -- Add reconnecting udp connection type to Jaeger exporter. - This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. - It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) -- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. - This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) -- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. - This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) -- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) -- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. - This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) -- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) -- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) -- Move `tools` package under `internal`. (#1141) -- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) - The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. -- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) -- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) -- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) -- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to - recommend the use of `newConfig()` instead of `configure()`. (#1163) -- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) -- Ensure exported interface types include parameter names and update the - Style Guide to reflect this styling rule. (#1172) -- Don't consider unset environment variable for resource detection to be an error. (#1170) -- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and - `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. -- ValueObserver instruments use LastValue aggregator by default. (#1165) -- OTLP Metric exporter supports LastValue aggregation. (#1165) -- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) -- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) -- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) -- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) -- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) -- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) -- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) -- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) -- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) -- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) -- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) - -### Removed - -- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the - `go.opentelemetry.io/contrib/propagators/` module. (#1191) -- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) - -### Fixed - -- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) -- Fix missing shutdown processor in otel-collector example. (#1186) -- Fix missing shutdown processor in basic and namedtracer examples. (#1197) - -## [0.11.0] - 2020-08-24 - -### Added - -- Support for exporting array-valued attributes via OTLP. (#992) -- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) -- Support for filtering metric label sets. (#1047) -- A dimensionality-reducing metric Processor. (#1057) -- Integration tests for more OTel Collector Attribute types. (#1062) -- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) - -### Changed - -- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) -- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) -- Rename `api/testharness` to `api/apitest`. (#1049) -- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) -- Change Metric Processor to merge multiple observations. (#1024) -- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. - This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) -- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) -- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) -- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) -- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) -- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) -- Unify Callback Function Naming. - Rename `*Callback` with `*Func`. (#1061) -- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) -- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. - This interface still supports the export of `SpanData`, but only as a slice. - Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. - If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) -- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. - This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) - -### Removed - -- Duplicate, unused API sampler interface. (#999) - Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. -- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. - This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) -- The `WithSpan` method of the `Tracer` interface. - The functionality this method provided was limited compared to what a user can provide themselves. - It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) -- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. - These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) -- The `oterror` package. (#1026) -- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) - -### Fixed - -- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) -- Correct instrumentation version tag in Jaeger exporter. (#1037) -- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) -- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) -- The `otel-collector` example referenced outdated collector processors. (#1006) - -## [0.10.0] - 2020-07-29 - -This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. - -### Added - -- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. - These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) -- Add propagator option for gRPC instrumentation. (#986) -- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) - -### Changed - -- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. - This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) -- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. - This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) -- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) -- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) - - `value.Bool` was replaced with `kv.BoolValue`. - - `value.Int64` was replaced with `kv.Int64Value`. - - `value.Uint64` was replaced with `kv.Uint64Value`. - - `value.Float64` was replaced with `kv.Float64Value`. - - `value.Int32` was replaced with `kv.Int32Value`. - - `value.Uint32` was replaced with `kv.Uint32Value`. - - `value.Float32` was replaced with `kv.Float32Value`. - - `value.String` was replaced with `kv.StringValue`. - - `value.Int` was replaced with `kv.IntValue`. - - `value.Uint` was replaced with `kv.UintValue`. - - `value.Array` was replaced with `kv.ArrayValue`. -- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) -- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) -- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) -- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) -- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) - -### Removed - -- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) - -### Fixed - -- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) -- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) -- Use `global.Handle` for span export errors in the OTLP exporter. (#946) -- Correct Go language formatting in the README documentation. (#961) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) -- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) -- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) - -## [0.9.0] - 2020-07-20 - -### Added - -- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) -- A Detector to automatically detect resources from an environment variable. (#939) -- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) -- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. - References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) - -### Changed - -- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) - -### Removed - -- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) - -## [0.8.0] - 2020-07-09 - -### Added - -- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. - A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) -- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) -- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) -- Add `peer.service` semantic attribute. (#898) -- Add database-specific semantic attributes. (#899) -- Add semantic convention for `faas.coldstart` and `container.id`. (#909) -- Add http content size semantic conventions. (#905) -- Include `http.request_content_length` in HTTP request basic attributes. (#905) -- Add semantic conventions for operating system process resource attribute keys. (#919) -- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) - -### Changed - -- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) -- Use lowercase header names for B3 Multiple Headers. (#881) -- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. - This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. - If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) -- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. - Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. - This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) -- Extend semantic conventions for RPC. (#900) -- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) - - `"api/standard".FaaSName` -> `FaaSNameKey` - - `"api/standard".FaaSID` -> `FaaSIDKey` - - `"api/standard".FaaSVersion` -> `FaaSVersionKey` - - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` - -### Removed - -- The `FlagsUnused` trace flag is removed. - The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) -- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. - If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) - -### Fixed - -- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) -- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) -- The B3 propagator now propagates the debug flag. - This removes the behavior of changing the debug flag into a set sampling bit. - Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) -- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) -- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) -- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) -- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) -- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) -- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) -- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) -- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) -- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) -- Update otel-colector example to use the v0.5.0 collector. (#915) -- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) -- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) -- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. - This is in accordance with OpenTelemetry semantic conventions. (#922) -- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) -- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) -- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) -- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) - -## [0.7.0] - 2020-06-26 - -This release implements the v0.5.0 version of the OpenTelemetry specification. - -### Added - -- The othttp instrumentation now includes default metrics. (#861) -- This CHANGELOG file to track all changes in the project going forward. -- Support for array type attributes. (#798) -- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) -- Timestamps are now passed to exporters for each export. (#835) -- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. - This replaces the prior `Record` `struct` use for this purpose. (#835) -- New dependabot integration to automate package upgrades. (#814) -- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. - This instrumentation version is passed on to exporters. (#811) (#805) (#802) -- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) -- Environment variables for Jaeger exporter are supported. (#796) -- New `aggregation.Kind` in the export metric API. (#808) -- New example that uses OTLP and the collector. (#790) -- Handle errors in the span `SetName` during span initialization. (#791) -- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) -- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) -- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. - There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) -- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) -- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) -- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 - -### Changed - -- Rename `Integrator` to `Processor` in the metric SDK. (#863) -- Rename `AggregationSelector` to `AggregatorSelector`. (#859) -- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) -- Rename `simple` integrator to `basic` integrator. (#857) -- Merge otlp collector examples. (#841) -- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. - With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) -- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) -- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. - All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. - Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) -- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) -- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) -- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 -- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) -- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) -- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) -- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) - -### Removed - -- `Uint64NumberKind` and related functions from the API. (#864) -- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) -- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) - -### Fixed - -- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) -- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) -- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) -- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) -- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) -- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) -- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) -- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) -- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) -- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) -- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) -- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) -- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) -- Set span status from HTTP status code in the othttp instrumentation. (#832) -- Fixed typo in push controller comment. (#834) -- The `Aggregator` testing has been updated and cleaned. (#812) -- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) -- Fixed `global` `handler_test.go` test failure. #804 -- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) -- Fixed OTLP example's accidental early close of exporter. (#807) -- Ensure zipkin exporter reads and closes response body. (#788) -- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) -- Clean up tools and RELEASING documentation. (#762) - -## [0.6.0] - 2020-05-21 - -### Added - -- Support for `Resource`s in the prometheus exporter. (#757) -- New pull controller. (#751) -- New `UpDownSumObserver` instrument. (#750) -- OpenTelemetry collector demo. (#711) -- New `SumObserver` instrument. (#747) -- New `UpDownCounter` instrument. (#745) -- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) -- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) - -### Changed - -- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) -- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) -- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) -- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) -- The prometheus exporter now uses the new pull controller. (#751) -- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) -- Support use of synchronous instruments in asynchronous callbacks (#725) -- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) -- Rename `Observer` instrument to `ValueObserver`. (#734) -- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) -- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) -- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) - -### Fixed - -- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) -- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) -- Fix `string` case in `kv` `Infer` function. (#746) -- Fix panic in grpctrace client interceptors. (#740) -- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) -- Rewrite span batch process queue batching logic. (#719) -- Remove the push controller named Meter map. (#738) -- Fix Histogram aggregator initial state (fix #735). (#736) -- Ensure golang alpine image is running `golang-1.14` for examples. (#733) -- Added test for grpctrace `UnaryInterceptorClient`. (#695) -- Rearrange `api/metric` code layout. (#724) - -## [0.5.0] - 2020-05-13 - -### Added - -- Batch `Observer` callback support. (#717) -- Alias `api` types to root package of project. (#696) -- Create basic `othttp.Transport` for simple client instrumentation. (#678) -- `SetAttribute(string, interface{})` to the trace API. (#674) -- Jaeger exporter option that allows user to specify custom http client. (#671) -- `Stringer` and `Infer` methods to `key`s. (#662) - -### Changed - -- Rename `NewKey` in the `kv` package to just `Key`. (#721) -- Move `core` and `key` to `kv` package. (#720) -- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) -- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) -- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) -- Move `Number` from `core` to `api/metric` package. (#706) -- Move `SpanContext` from `core` to `trace` package. (#692) -- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) - -### Fixed - -- Update tooling to run generators in all submodules. (#705) -- gRPC interceptor regexp to match methods without a service name. (#683) -- Use a `const` for padding 64-bit B3 trace IDs. (#701) -- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) -- Left-pad 64-bit B3 trace IDs with zero. (#698) -- Propagate at least the first W3C tracestate header. (#694) -- Remove internal `StateLocker` implementation. (#688) -- Increase instance size CI system uses. (#690) -- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) -- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) -- Reimplement histogram using mutex instead of `StateLocker`. (#669) -- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) -- Update documentation to not include any references to `WithKeys`. (#672) -- Correct misspelling. (#668) -- Fix clobbering of the span context if extraction fails. (#656) -- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) - -## [0.4.3] - 2020-04-24 - -### Added - -- `Dockerfile` and `docker-compose.yml` to run example code. (#635) -- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) -- New `api/label` package, providing common label set implementation. (#651) -- Support for JSON marshaling of `Resources`. (#654) -- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) -- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) -- `WithSpanFormatter` option to the othttp plugin. (#617) -- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) -- The prometheus exporter now supports exporting histograms. (#601) -- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) -- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) -- An `Equal` method to the `Resource` test the equivalence of resources. (#613) -- An iterable structure (`AttributeIterator`) for `Resource` attributes. - -### Changed - -- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) -- Pass `Resources` through the metrics export pipeline. (#659) - -### Removed - -- `WithKeys` option from the metric API. (#639) - -### Fixed - -- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) -- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) -- Use type names for return values in jaeger exporter. (#648) -- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) -- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) -- Do not cache `reflect.ValueOf()` in metric Labels. (#649) -- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) -- Add error wrapping to the prometheus exporter. (#631) -- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) -- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) -- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) -- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) -- Ensure spans created by httptrace client tracer reflect operation structure. (#618) -- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 -- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) - -## [0.4.2] - 2020-03-31 - -### Fixed - -- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) -- Fix time conversion from internal to OTLP in OTLP exporter. (#606) - -## [0.4.1] - 2020-03-31 - -### Fixed - -- Update `tag.sh` to create signed tags. (#604) - -## [0.4.0] - 2020-03-30 - -### Added - -- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) -- Script to verify examples after a new release. (#579) - -### Removed - -- The dogstatsd exporter due to lack of support. - This additionally removes support for statsd. (#591) -- `LabelSet` from the metric API. - This is replaced by a `[]core.KeyValue` slice. (#595) -- `Labels` from the metric API's `Meter` interface. (#595) - -### Changed - -- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) -- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) -- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) - -### Fixed - -- Corrected missing return in mock span. (#582) -- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) -- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) -- Update pre-release script to be compatible between GNU and BSD based systems. (#592) -- Add a `RecordBatch` benchmark. (#594) -- Moved span transforms of the OTLP exporter to the internal package. (#593) -- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) -- Removed unneeded allocation on empty labels in OLTP exporter. (#597) -- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) -- Update project documentation godoc.org links to pkg.go.dev. (#602) - -## [0.3.0] - 2020-03-21 - -This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. -There is still a possibility of breaking changes. - -### Added - -- Add `Observer` metric instrument. (#474) -- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) -- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) -- The zipkin trace exporter. (#495) -- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) -- Add `StatusMessage` field to the trace `Span`. (#524) -- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) -- The `Resource` type was added to the SDK. (#528) -- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) -- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. - Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) -- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) -- Scripts to better automate the release process. (#576) - -### Changed - -- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) -- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) -- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) -- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) -- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) -- Rename metric API `Options` to `Config`. (#541) -- Rename metric `Counter` aggregator to be `Sum`. (#541) -- Unify metric options into `Option` from instrument specific options. (#541) -- The trace API's `TraceProvider` now support `Resource`s. (#545) -- Correct error in zipkin module name. (#548) -- The jaeger trace exporter now supports `Resource`s. (#551) -- Metric SDK now supports `Resource`s. - The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) -- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) -- The stdout trace exporter now supports `Resource`s. (#558) -- The metric `Descriptor` is now included at the API instead of the SDK. (#560) -- Replace `Ordered` with an iterator in `export.Labels`. (#567) - -### Removed - -- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) -- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) -- `GetDescriptor` from the metric SDK. (#575) -- The `Gauge` instrument from the metric API. (#537) - -### Fixed - -- Make histogram aggregator checkpoint consistent. (#438) -- Update README with import instructions and how to build and test. (#505) -- The default label encoding was updated to be unique. (#508) -- Use `NewRoot` in the othttp plugin for public endpoints. (#513) -- Fix data race in `BatchedSpanProcessor`. (#518) -- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 -- Use a variable-size array to represent ordered labels in maps. (#523) -- Update the OTLP protobuf and update changed import path. (#532) -- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) -- Eliminate goroutine leak in histogram stress test. (#547) -- Update OTLP exporter with latest protobuf. (#550) -- Add filters to the othttp plugin. (#556) -- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) -- Encode labels once during checkpoint. - The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. - This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) -- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) - -## [0.2.3] - 2020-03-04 - -### Added - -- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) -- Configurable push frequency for exporters setup pipeline. (#504) - -### Changed - -- Rename the `exporter` directory to `exporters`. - The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. - This resulted in all subsequent releases not becoming the default latest. - A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. - Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. - Consequentially, this action also renames *all* exporter packages. (#502) - -### Removed - -- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) - -## [0.2.2] - 2020-02-27 - -### Added - -- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) -- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) -- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) -- `Config` and configuring `Option` to the propagator API. (#467) -- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) -- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) -- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) -- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) -- Histogram aggregator. (#433) -- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) -- `AlwaysParentSample` sampler to the trace API. (#455) -- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) - -### Changed - -- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) -- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) -- Move correlation context propagation to correlation package. (#479) -- Do not default to putting remote span context into links. (#480) -- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) -- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) -- Renamed the `export` package to `metric` to match directory structure. (#432) -- Rename the `api/distributedcontext` package to `api/correlation`. (#444) -- Rename the `api/propagators` package to `api/propagation`. (#444) -- Move the propagators from the `propagators` package into the `trace` API package. (#444) -- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) -- Moved all dependencies of tools package to a tools directory. (#466) - -### Removed - -- Binary propagators. (#467) -- NOOP propagator. (#467) - -### Fixed - -- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) -- Fix a possible nil-dereference crash (#478) -- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) -- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) -- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) -- Initialize `onError` based on `Config` in prometheus exporter. (#486) -- Correct module name in prometheus exporter README. (#475) -- Removed tracer name prefix from span names. (#430) -- Fix `aggregator_test.go` import package comment. (#431) -- Improved detail in stdout exporter. (#436) -- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) -- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) -- Reword function documentation in gRPC plugin. (#446) -- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) -- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) -- Upgraded to Go 1.13 in CI. (#465) -- Correct opentelemetry.io URL in trace SDK documentation. (#464) -- Refactored reference counting logic in SDK determination of stale records. (#468) -- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) - -## [0.2.1.1] - 2020-01-13 - -### Fixed - -- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) - -## [0.2.1] - 2020-01-08 - -### Added - -- Global meter forwarding implementation. - This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) -- Global trace forwarding implementation. - This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) -- Standardize export pipeline creation in all exporters. (#395) -- A testing, organization, and comments for 64-bit field alignment. (#418) -- Script to tag all modules in the project. (#414) - -### Changed - -- Renamed `propagation` package to `propagators`. (#362) -- Renamed `B3Propagator` propagator to `B3`. (#362) -- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) -- Renamed `BinaryPropagator` propagator to `Binary`. (#362) -- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) -- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) -- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) -- Renamed `SpanOption` to `StartOption` in the trace API. (#369) -- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) -- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) -- `Number` now has a pointer receiver for its methods. (#375) -- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) -- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) -- Renamed `Message` in Event to `Name` in the trace API. (#389) -- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) -- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) -- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) -- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) -- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) -- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) -- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) -- Renamed the `File` option in the stdout exporter to `Writer`. (#404) -- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. - -### Fixed - -- Aggregator import path corrected. (#421) -- Correct links in README. (#368) -- The README was updated to match latest code changes in its examples. (#374) -- Don't capitalize error statements. (#375) -- Fix ignored errors. (#375) -- Fix ambiguous variable naming. (#375) -- Removed unnecessary type casting. (#375) -- Use named parameters. (#375) -- Updated release schedule. (#378) -- Correct http-stackdriver example module name. (#394) -- Removed the `http.request` span in `httptrace` package. (#397) -- Add comments in the metrics SDK (#399) -- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) -- Add documentation of compatible exporters in the README. (#405) -- Typo fix. (#408) -- Simplify span check logic in SDK tracer implementation. (#419) - -## [0.2.0] - 2019-12-03 - -### Added - -- Unary gRPC tracing example. (#351) -- Prometheus exporter. (#334) -- Dogstatsd metrics exporter. (#326) - -### Changed - -- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) -- Rename `GetMeter` to `Meter`. (#357) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) -- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) -- Move `/global` package to `/api/global`. (#356) -- Rename `GetTracer` to `Tracer`. (#347) - -### Removed - -- `SetAttribute` from the `Span` interface in the trace API. (#361) -- `AddLink` from the `Span` interface in the trace API. (#349) -- `Link` from the `Span` interface in the trace API. (#349) - -### Fixed - -- Exclude example directories from coverage report. (#365) -- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) -- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) -- Run the race checker for all test. (#354) -- Redundant commands in the Makefile are removed. (#354) -- Split the `generate` and `lint` targets of the Makefile. (#354) -- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) -- Add example Prometheus binary to gitignore. (#358) -- Support negative numbers with the `MaxSumCount`. (#335) -- Resolve race conditions in `push_test.go` identified in #339. (#340) -- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) -- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. - Previously it was testing `AlwaysSample` twice. (#325) -- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) -- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) -- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. - This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. - This was corrected. (#333) - -## [0.1.2] - 2019-11-18 - -### Fixed - -- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) -- Removed unnecessary unslicing of parameters that are already a slice. (#324) - -## [0.1.1] - 2019-11-18 - -This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. - -### Added - -- Metrics stdout export pipeline. (#265) -- Array aggregation for raw measure metrics. (#282) -- The core.Value now have a `MarshalJSON` method. (#281) - -### Removed - -- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) -- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) - -### Changed - -- Allocation in LabelSet construction to reduce GC overhead. (#318) -- `trace.WithAttributes` to append values instead of replacing (#315) -- Use a formula for tolerance in sampling tests. (#298) -- Move export types into trace and metric-specific sub-directories. (#289) -- `SpanKind` back to being based on an `int` type. (#288) - -### Fixed - -- URL to OpenTelemetry website in README. (#323) -- Name of othttp default tracer. (#321) -- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) -- CI modules cache to correctly restore/save from/to the cache. (#316) -- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) -- README now reflects the new code structure introduced with these changes. (#291) -- Make the basic example work. (#279) - -## [0.1.0] - 2019-11-04 - -This is the first release of open-telemetry go library. -It contains api and sdk for trace and meter. - -### Added - -- Initial OpenTelemetry trace and metric API prototypes. -- Initial OpenTelemetry trace, metric, and export SDK packages. -- A wireframe bridge to support compatibility with OpenTracing. -- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. -- Exporters for Jaeger, Stackdriver, and stdout. -- Propagators for binary, B3, and trace-context protocols. -- Project information and guidelines in the form of a README and CONTRIBUTING. -- Tools to build the project and a Makefile to automate the process. -- Apache-2.0 license. -- CircleCI build CI manifest files. -- CODEOWNERS file to track owners of this project. - -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.14.0...HEAD -[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0 -[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0 -[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 -[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 -[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 -[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 -[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 -[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 -[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 -[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 -[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 -[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 -[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 -[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 -[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 -[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 -[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 -[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 -[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 -[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 -[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 -[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 -[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 -[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 -[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 -[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 -[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 -[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 -[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 -[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 -[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 -[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 -[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 -[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 -[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 -[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 -[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 -[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 -[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 -[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 -[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 -[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 -[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 -[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 -[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 -[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 -[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 -[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 -[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 -[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 -[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 -[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 -[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 -[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 -[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 -[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 -[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 -[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 -[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 -[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 - -[Go 1.20]: https://go.dev/doc/go1.20 -[Go 1.19]: https://go.dev/doc/go1.19 -[Go 1.18]: https://go.dev/doc/go1.18 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS deleted file mode 100644 index c4012ed6c..000000000 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ /dev/null @@ -1,17 +0,0 @@ -##################################################### -# -# List of approvers for this repository -# -##################################################### -# -# Learn about membership in OpenTelemetry community: -# https://github.com/open-telemetry/community/blob/main/community-membership.md -# -# -# Learn about CODEOWNERS file format: -# https://help.github.com/en/articles/about-code-owners -# - -* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu - -CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md deleted file mode 100644 index a6928bfdf..000000000 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ /dev/null @@ -1,526 +0,0 @@ -# Contributing to opentelemetry-go - -The Go special interest group (SIG) meets regularly. See the -OpenTelemetry -[community](https://github.com/open-telemetry/community#golang-sdk) -repo for information on this and other language SIGs. - -See the [public meeting -notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) -for a summary description of past meetings. To request edit access, -join the meeting or get in touch on -[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). - -## Development - -You can view and edit the source code by cloning this repository: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go.git -``` - -Run `make test` to run the tests instead of `go test`. - -There are some generated files checked into the repo. To make sure -that the generated files are up-to-date, run `make` (or `make -precommit` - the `precommit` target is the default). - -The `precommit` target also fixes the formatting of the code and -checks the status of the go module files. - -If after running `make precommit` the output of `git status` contains -`nothing to commit, working tree clean` then it means that everything -is up-to-date and properly formatted. - -## Pull Requests - -### How to Send Pull Requests - -Everyone is welcome to contribute code to `opentelemetry-go` via -GitHub pull requests (PRs). - -To create a new PR, fork the project in GitHub and clone the upstream -repo: - -```sh -go get -d go.opentelemetry.io/otel -``` - -(This may print some warning about "build constraints exclude all Go -files", just ignore it.) - -This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You -can alternatively use `git` directly with: - -```sh -git clone https://github.com/open-telemetry/opentelemetry-go -``` - -(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - -that name is a kind of a redirector to GitHub that `go get` can -understand, but `git` does not.) - -This would put the project in the `opentelemetry-go` directory in -current working directory. - -Enter the newly created directory and add your fork as a new remote: - -```sh -git remote add git@github.com:/opentelemetry-go -``` - -Check out a new branch, make modifications, run linters and tests, update -`CHANGELOG.md`, and push the branch to your fork: - -```sh -git checkout -b -# edit files -# update changelog -make precommit -git add -p -git commit -git push -``` - -Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull -request ID to the entry you added to `CHANGELOG.md`. - -### How to Receive Comments - -* If the PR is not ready for review, please put `[WIP]` in the title, - tag it as `work-in-progress`, or mark it as - [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). -* Make sure CLA is signed and CI is clear. - -### How to Get PRs Merged - -A PR is considered to be **ready to merge** when: - -* It has received two approvals from Collaborators/Maintainers (at - different companies). This is not enforced through technical means - and a PR may be **ready to merge** with a single approval if the change - and its approach have been discussed and consensus reached. -* Feedback has been addressed. -* Any substantive changes to your PR will require that you clear any prior - Approval reviews, this includes changes resulting from other feedback. Unless - the approver explicitly stated that their approval will persist across - changes it should be assumed that the PR needs their review again. Other - project members (e.g. approvers, maintainers) can help with this if there are - any questions or if you forget to clear reviews. -* It has been open for review for at least one working day. This gives - people reasonable time to review. -* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for - one day and may be merged with a single Maintainer's approval. -* `CHANGELOG.md` has been updated to reflect what has been - added, changed, removed, or fixed. -* `README.md` has been updated if necessary. -* Urgent fix can take exception as long as it has been actively - communicated. - -Any Maintainer can merge the PR once it is **ready to merge**. - -## Design Choices - -As with other OpenTelemetry clients, opentelemetry-go follows the -[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). - -It's especially valuable to read through the [library -guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). - -### Focus on Capabilities, Not Structure Compliance - -OpenTelemetry is an evolving specification, one where the desires and -use cases are clear, but the method to satisfy those uses cases are -not. - -As such, Contributions should provide functionality and behavior that -conforms to the specification, but the interface and structure is -flexible. - -It is preferable to have contributions follow the idioms of the -language rather than conform to specific API names or argument -patterns in the spec. - -For a deeper discussion, see -[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). - -## Documentation - -Each non-example Go Module should have its own `README.md` containing: - -- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). -- Brief description. -- Installation instructions (and requirements if applicable). -- Hyperlink to an example. Depending on the component the example can be: - - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). - - A sample Go application with its own `README.md`, like [here](example/zipkin). -- Additional documentation sections such us: - - Configuration, - - Contributing, - - References. - -[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. - -Moreover, it should be possible to navigate to any `README.md` from the -root `README.md`. - -## Style Guide - -One of the primary goals of this project is that it is actually used by -developers. With this goal in mind the project strives to build -user-friendly and idiomatic Go code adhering to the Go community's best -practices. - -For a non-comprehensive but foundational overview of these best practices -the [Effective Go](https://golang.org/doc/effective_go.html) documentation -is an excellent starting place. - -As a convenience for developers building this project the `make precommit` -will format, lint, validate, and in some cases fix the changes you plan to -submit. This check will need to pass for your changes to be able to be -merged. - -In addition to idiomatic Go, the project has adopted certain standards for -implementations of common patterns. These standards should be followed as a -default, and if they are not followed documentation needs to be included as -to the reasons why. - -### Configuration - -When creating an instantiation function for a complex `type T struct`, it is -useful to allow variable number of options to be applied. However, the strong -type system of Go restricts the function design options. There are a few ways -to solve this problem, but we have landed on the following design. - -#### `config` - -Configuration should be held in a `struct` named `config`, or prefixed with -specific type name this Configuration applies to if there are multiple -`config` in the package. This type must contain configuration options. - -```go -// config contains configuration options for a thing. -type config struct { - // options ... -} -``` - -In general the `config` type will not need to be used externally to the -package and should be unexported. If, however, it is expected that the user -will likely want to build custom options for the configuration, the `config` -should be exported. Please, include in the documentation for the `config` -how the user can extend the configuration. - -It is important that internal `config` are not shared across package boundaries. -Meaning a `config` from one package should not be directly used by another. The -one exception is the API packages. The configs from the base API, eg. -`go.opentelemetry.io/otel/trace.TracerConfig` and -`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed -by the SDK therefor it is expected that these are exported. - -When a config is exported we want to maintain forward and backward -compatibility, to achieve this no fields should be exported but should -instead be accessed by methods. - -Optionally, it is common to include a `newConfig` function (with the same -naming scheme). This function wraps any defaults setting and looping over -all options to create a configured `config`. - -```go -// newConfig returns an appropriately configured config. -func newConfig(options ...Option) config { - // Set default values for config. - config := config{/* […] */} - for _, option := range options { - config = option.apply(config) - } - // Preform any validation here. - return config -} -``` - -If validation of the `config` options is also preformed this can return an -error as well that is expected to be handled by the instantiation function -or propagated to the user. - -Given the design goal of not having the user need to work with the `config`, -the `newConfig` function should also be unexported. - -#### `Option` - -To set the value of the options a `config` contains, a corresponding -`Option` interface type should be used. - -```go -type Option interface { - apply(config) config -} -``` - -Having `apply` unexported makes sure that it will not be used externally. -Moreover, the interface becomes sealed so the user cannot easily implement -the interface on its own. - -The `apply` method should return a modified version of the passed config. -This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. - -The name of the interface should be prefixed in the same way the -corresponding `config` is (if at all). - -#### Options - -All user configurable options for a `config` must have a related unexported -implementation of the `Option` interface and an exported configuration -function that wraps this implementation. - -The wrapping function name should be prefixed with `With*` (or in the -special case of a boolean options `Without*`) and should have the following -function signature. - -```go -func With*(…) Option { … } -``` - -##### `bool` Options - -```go -type defaultFalseOption bool - -func (o defaultFalseOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithOption sets a T to have an option included. -func WithOption() Option { - return defaultFalseOption(true) -} -``` - -```go -type defaultTrueOption bool - -func (o defaultTrueOption) apply(c config) config { - c.Bool = bool(o) - return c -} - -// WithoutOption sets a T to have Bool option excluded. -func WithoutOption() Option { - return defaultTrueOption(false) -} -``` - -##### Declared Type Options - -```go -type myTypeOption struct { - MyType MyType -} - -func (o myTypeOption) apply(c config) config { - c.MyType = o.MyType - return c -} - -// WithMyType sets T to have include MyType. -func WithMyType(t MyType) Option { - return myTypeOption{t} -} -``` - -##### Functional Options - -```go -type optionFunc func(config) config - -func (fn optionFunc) apply(c config) config { - return fn(c) -} - -// WithMyType sets t as MyType. -func WithMyType(t MyType) Option { - return optionFunc(func(c config) config { - c.MyType = t - return c - }) -} -``` - -#### Instantiation - -Using this configuration pattern to configure instantiation with a `NewT` -function. - -```go -func NewT(options ...Option) T {…} -``` - -Any required parameters can be declared before the variadic `options`. - -#### Dealing with Overlap - -Sometimes there are multiple complex `struct` that share common -configuration and also have distinct configuration. To avoid repeated -portions of `config`s, a common `config` can be used with the union of -options being handled with the `Option` interface. - -For example. - -```go -// config holds options for all animals. -type config struct { - Weight float64 - Color string - MaxAltitude float64 -} - -// DogOption apply Dog specific options. -type DogOption interface { - applyDog(config) config -} - -// BirdOption apply Bird specific options. -type BirdOption interface { - applyBird(config) config -} - -// Option apply options for all animals. -type Option interface { - BirdOption - DogOption -} - -type weightOption float64 - -func (o weightOption) applyDog(c config) config { - c.Weight = float64(o) - return c -} - -func (o weightOption) applyBird(c config) config { - c.Weight = float64(o) - return c -} - -func WithWeight(w float64) Option { return weightOption(w) } - -type furColorOption string - -func (o furColorOption) applyDog(c config) config { - c.Color = string(o) - return c -} - -func WithFurColor(c string) DogOption { return furColorOption(c) } - -type maxAltitudeOption float64 - -func (o maxAltitudeOption) applyBird(c config) config { - c.MaxAltitude = float64(o) - return c -} - -func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } - -func NewDog(name string, o ...DogOption) Dog {…} -func NewBird(name string, o ...BirdOption) Bird {…} -``` - -### Interfaces - -To allow other developers to better comprehend the code, it is important -to ensure it is sufficiently documented. One simple measure that contributes -to this aim is self-documenting by naming method parameters. Therefore, -where appropriate, methods of every exported interface type should have -their parameters appropriately named. - -#### Interface Stability - -All exported stable interfaces that include the following warning in their -doumentation are allowed to be extended with additional methods. - -> Warning: methods may be added to this interface in minor releases. - -Otherwise, stable interfaces MUST NOT be modified. - -If new functionality is needed for an interface that cannot be changed it MUST -be added by including an additional interface. That added interface can be a -simple interface for the specific functionality that you want to add or it can -be a super-set of the original interface. For example, if you wanted to a -`Close` method to the `Exporter` interface: - -```go -type Exporter interface { - Export() -} -``` - -A new interface, `Closer`, can be added: - -```go -type Closer interface { - Close() -} -``` - -Code that is passed the `Exporter` interface can now check to see if the passed -value also satisfies the new interface. E.g. - -```go -func caller(e Exporter) { - /* ... */ - if c, ok := e.(Closer); ok { - c.Close() - } - /* ... */ -} -``` - -Alternatively, a new type that is the super-set of an `Exporter` can be created. - -```go -type ClosingExporter struct { - Exporter - Close() -} -``` - -This new type can be used similar to the simple interface above in that a -passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type -and the `Close` method called. - -This super-set approach can be useful if there is explicit behavior that needs -to be coupled with the original type and passed as a unified type to a new -function, but, because of this coupling, it also limits the applicability of -the added functionality. If there exist other interfaces where this -functionality should be added, each one will need their own super-set -interfaces and will duplicate the pattern. For this reason, the simple targeted -interface that defines the specific functionality should be preferred. - -## Approvers and Maintainers - -Approvers: - -- [Evan Torrie](https://github.com/evantorrie), Verizon Media -- [Josh MacDonald](https://github.com/jmacd), LightStep -- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics -- [David Ashpole](https://github.com/dashpole), Google -- [Robert Pająk](https://github.com/pellared), Splunk -- [Chester Cheung](https://github.com/hanyuancheung), Tencent -- [Damien Mathieu](https://github.com/dmathieu), Elastic - -Maintainers: - -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep -- [Anthony Mirabella](https://github.com/Aneurysm9), AWS -- [Tyler Yahn](https://github.com/MrAlias), Splunk - -Emeritus: - -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - -### Become an Approver or a Maintainer - -See the [community membership document in OpenTelemetry community -repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/go.opentelemetry.io/otel/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile deleted file mode 100644 index 0e6ffa284..000000000 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ /dev/null @@ -1,227 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -TOOLS_MOD_DIR := ./internal/tools - -ALL_DOCS := $(shell find . -name '*.md' -type f | sort) -ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) -OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) - -GO = go -TIMEOUT = 60 - -.DEFAULT_GOAL := precommit - -.PHONY: precommit ci -precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default -ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage - -# Tools - -TOOLS = $(CURDIR)/.tools - -$(TOOLS): - @mkdir -p $@ -$(TOOLS)/%: | $(TOOLS) - cd $(TOOLS_MOD_DIR) && \ - $(GO) build -o $@ $(PACKAGE) - -MULTIMOD = $(TOOLS)/multimod -$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod - -SEMCONVGEN = $(TOOLS)/semconvgen -$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen - -CROSSLINK = $(TOOLS)/crosslink -$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink - -SEMCONVKIT = $(TOOLS)/semconvkit -$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit - -DBOTCONF = $(TOOLS)/dbotconf -$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf - -GOLANGCI_LINT = $(TOOLS)/golangci-lint -$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint - -MISSPELL = $(TOOLS)/misspell -$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell - -GOCOVMERGE = $(TOOLS)/gocovmerge -$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge - -STRINGER = $(TOOLS)/stringer -$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer - -PORTO = $(TOOLS)/porto -$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto - -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - -.PHONY: tools -tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) - -# Build - -.PHONY: generate build - -generate: $(OTEL_GO_MOD_DIRS:%=generate/%) -generate/%: DIR=$* -generate/%: | $(STRINGER) $(PORTO) - @echo "$(GO) generate $(DIR)/..." \ - && cd $(DIR) \ - && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . - -build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) -build/%: DIR=$* -build/%: - @echo "$(GO) build $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) build ./... - -build-tests/%: DIR=$* -build-tests/%: - @echo "$(GO) build tests $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null - -# Tests - -TEST_TARGETS := test-default test-bench test-short test-verbose test-race -.PHONY: $(TEST_TARGETS) test -test-default test-race: ARGS=-race -test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. -test-short: ARGS=-short -test-verbose: ARGS=-v -race -$(TEST_TARGETS): test -test: $(OTEL_GO_MOD_DIRS:%=test/%) -test/%: DIR=$* -test/%: - @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ - && cd $(DIR) \ - && $(GO) list ./... \ - | grep -v third_party \ - | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) - -COVERAGE_MODE = atomic -COVERAGE_PROFILE = coverage.out -.PHONY: test-coverage -test-coverage: | $(GOCOVMERGE) - @set -e; \ - printf "" > coverage.txt; \ - for dir in $(ALL_COVERAGE_MOD_DIRS); do \ - echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ - (cd "$${dir}" && \ - $(GO) list ./... \ - | grep -v third_party \ - | grep -v 'semconv/v.*' \ - | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ - $(GO) tool cover -html=coverage.out -o coverage.html); \ - done; \ - $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt - -.PHONY: golangci-lint golangci-lint-fix -golangci-lint-fix: ARGS=--fix -golangci-lint-fix: golangci-lint -golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) -golangci-lint/%: DIR=$* -golangci-lint/%: | $(GOLANGCI_LINT) - @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ - && cd $(DIR) \ - && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) - -.PHONY: crosslink -crosslink: | $(CROSSLINK) - @echo "Updating intra-repository dependencies in all go modules" \ - && $(CROSSLINK) --root=$(shell pwd) --prune - -.PHONY: go-mod-tidy -go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) -go-mod-tidy/%: DIR=$* -go-mod-tidy/%: | crosslink - @echo "$(GO) mod tidy in $(DIR)" \ - && cd $(DIR) \ - && $(GO) mod tidy -compat=1.18 - -.PHONY: lint-modules -lint-modules: go-mod-tidy - -.PHONY: lint -lint: misspell lint-modules golangci-lint - -.PHONY: vanity-import-check -vanity-import-check: | $(PORTO) - @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" - -.PHONY: vanity-import-fix -vanity-import-fix: | $(PORTO) - @$(PORTO) --include-internal -w . - -.PHONY: misspell -misspell: | $(MISSPELL) - @$(MISSPELL) -w $(ALL_DOCS) - -.PHONY: license-check -license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ - awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -DEPENDABOT_CONFIG = .github/dependabot.yml -.PHONY: dependabot-check -dependabot-check: | $(DBOTCONF) - @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" - -.PHONY: dependabot-generate -dependabot-generate: | $(DBOTCONF) - @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) - -.PHONY: check-clean-work-tree -check-clean-work-tree: - @if ! git diff --quiet; then \ - echo; \ - echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ - echo; \ - git status; \ - exit 1; \ - fi - -SEMCONVPKG ?= "semconv/" -.PHONY: semconv-generate -semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) - [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) - [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" - -.PHONY: prerelease -prerelease: | $(MULTIMOD) - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} - -COMMIT ?= "HEAD" -.PHONY: add-tags -add-tags: | $(MULTIMOD) - @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) - $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md deleted file mode 100644 index 878d87e58..000000000 --- a/vendor/go.opentelemetry.io/otel/README.md +++ /dev/null @@ -1,114 +0,0 @@ -# OpenTelemetry-Go - -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) -[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) -[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) -[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) -[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) - -OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). -It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. - -## Project Status - -| Signal | Status | Project | -| ------- | ---------- | ------- | -| Traces | Stable | N/A | -| Metrics | Alpha | N/A | -| Logs | Frozen [1] | N/A | - -- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. - No Logs Pull Requests are currently being accepted. - -Progress and status specific to this repository is tracked in our local -[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) -and -[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). - -Project versioning information and stability guarantees can be found in the -[versioning documentation](./VERSIONING.md). - -### Compatibility - -OpenTelemetry-Go ensures compatibility with the current supported versions of -the [Go language](https://golang.org/doc/devel/release#policy): - -> Each major Go release is supported until there are two newer major releases. -> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. - -For versions of Go that are no longer supported upstream, opentelemetry-go will -stop ensuring compatibility with these versions in the following manner: - -- A minor release of opentelemetry-go will be made to add support for the new - supported release of Go. -- The following minor release of opentelemetry-go will remove compatibility - testing for the oldest (now archived upstream) version of Go. This, and - future, releases of opentelemetry-go may include features only supported by - the currently supported versions of Go. - -Currently, this project supports the following environments. - -| OS | Go Version | Architecture | -| ------- | ---------- | ------------ | -| Ubuntu | 1.20 | amd64 | -| Ubuntu | 1.19 | amd64 | -| Ubuntu | 1.18 | amd64 | -| Ubuntu | 1.20 | 386 | -| Ubuntu | 1.19 | 386 | -| Ubuntu | 1.18 | 386 | -| MacOS | 1.20 | amd64 | -| MacOS | 1.19 | amd64 | -| MacOS | 1.18 | amd64 | -| Windows | 1.20 | amd64 | -| Windows | 1.19 | amd64 | -| Windows | 1.18 | amd64 | -| Windows | 1.20 | 386 | -| Windows | 1.19 | 386 | -| Windows | 1.18 | 386 | - -While this project should work for other systems, no compatibility guarantees -are made for those systems currently. - -## Getting Started - -You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). - -OpenTelemetry's goal is to provide a single set of APIs to capture distributed -traces and metrics from your application and send them to an observability -platform. This project allows you to do just that for applications written in -Go. There are two steps to this process: instrument your application, and -configure an exporter. - -### Instrumentation - -To start capturing distributed traces and metric events from your application -it first needs to be instrumented. The easiest way to do this is by using an -instrumentation library for your code. Be sure to check out [the officially -supported instrumentation -libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). - -If you need to extend the telemetry an instrumentation library provides or want -to build your own instrumentation for your application directly you will need -to use the -[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. - -### Export - -Now that your application is instrumented to collect telemetry, it needs an -export pipeline to send that telemetry to an observability platform. - -All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). - -| Exporter | Metrics | Traces | -| :-----------------------------------: | :-----: | :----: | -| [Jaeger](./exporters/jaeger/) | | ✓ | -| [OTLP](./exporters/otlp/) | ✓ | ✓ | -| [Prometheus](./exporters/prometheus/) | ✓ | | -| [stdout](./exporters/stdout/) | ✓ | ✓ | -| [Zipkin](./exporters/zipkin/) | | ✓ | - -## Contributing - -See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md deleted file mode 100644 index 77d56c936..000000000 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ /dev/null @@ -1,127 +0,0 @@ -# Release Process - -## Semantic Convention Generation - -New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. -The `semconv-generate` make target is used for this. - -1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. -2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` -3. Run the `make semconv-generate ...` target from this repository. - -For example, - -```sh -export TAG="v1.13.0" # Change to the release version you are generating. -export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" -git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG" -docker pull otel/semconvgen:latest -make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. -``` - -This should create a new sub-package of [`semconv`](./semconv). -Ensure things look correct before submitting a pull request to include the addition. - -**Note**, the generation code was changed to generate versions >= 1.13. -To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). - -## Pre-Release - -First, decide which module sets will be released and update their versions -in `versions.yaml`. Commit this change to a new branch. - -Update go.mod for submodules to depend on the new release which will happen in the next step. - -1. Run the `prerelease` make target. It creates a branch - `prerelease__` that will contain all release changes. - - ``` - make prerelease MODSET= - ``` - -2. Verify the changes. - - ``` - git diff ...prerelease__ - ``` - - This should have changed the version for all modules to be ``. - If these changes look correct, merge them into your pre-release branch: - - ```go - git merge prerelease__ - ``` - -3. Update the [Changelog](./CHANGELOG.md). - - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. - To verify this, you can look directly at the commits since the ``. - - ``` - git --no-pager log --pretty=oneline "..HEAD" - ``` - - - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). - - Update all the appropriate links at the bottom. - -4. Push the changes to upstream and create a Pull Request on GitHub. - Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. - -## Tag - -Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. - -***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! -Failure to do so will leave things in a broken state. As long as you do not -change `versions.yaml` between pre-release and this step, things should be fine. - -***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). -It is critical you make sure the version you push upstream is correct. -[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). - -1. For each module set that will be released, run the `add-tags` make target - using the `` of the commit on the main branch for the merged Pull Request. - - ``` - make add-tags MODSET= COMMIT= - ``` - - It should only be necessary to provide an explicit `COMMIT` value if the - current `HEAD` of your working directory is not the correct commit. - -2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). - Make sure you push all sub-modules as well. - - ``` - git push upstream - git push upstream - ... - ``` - -## Release - -Finally create a Release for the new `` on GitHub. -The release body should include all the release notes from the Changelog for this release. - -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - -## Post-Release - -### Contrib Repository - -Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. - -### Website Documentation - -Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). -Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. - -[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md deleted file mode 100644 index 412f1e362..000000000 --- a/vendor/go.opentelemetry.io/otel/VERSIONING.md +++ /dev/null @@ -1,224 +0,0 @@ -# Versioning - -This document describes the versioning policy for this repository. This policy -is designed so the following goals can be achieved. - -**Users are provided a codebase of value that is stable and secure.** - -## Policy - -* Versioning of this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver - 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. - * New methods may be added to exported API interfaces. All exported - interfaces that fall within this exception will include the following - paragraph in their public documentation. - - > Warning: methods may be added to this interface in minor releases. - - * If a module is version `v2` or higher, the major version of the module - must be included as a `/vN` at the end of the module paths used in - `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require - go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path - (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the - paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a - `@v2.0.1` in that example. One way to think about it is that the module - name now includes the `/v2`, so include `/v2` whenever you are using the - module name). - * If a module is version `v0` or `v1`, do not include the major version in - either the module path or the import path. - * Modules will be used to encapsulate signals and components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API will be versioned - with a major version greater than `v0`. - * The decision to make a module stable will be made on a case-by-case - basis by the maintainers of this project. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * All stable modules that use the same major version number will use the - same entire version number. - * Stable modules may be released with an incremented minor or patch - version even though that module has not been changed, but rather so - that it will remain at the same version as other stable modules that - did undergo change. - * When an experimental module becomes stable a new stable module version - will be released and will include this now stable module. The new - stable module version will be an increment of the minor version number - and will be applied to all existing stable modules as well as the newly - stable module being released. -* Versioning of the associated [contrib - repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of - this project will be idiomatic of a Go project using [Go - modules](https://github.com/golang/go/wiki/Modules). - * [Semantic import - versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) - will be used. - * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). - * If a module is version `v2` or higher, the - major version of the module must be included as a `/vN` at the end of the - module paths used in `go.mod` files (e.g., `module - go.opentelemetry.io/contrib/instrumentation/host/v2`, `require - go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the - package import path (e.g., `import - "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes - the paths used in `go get` commands (e.g., `go get - go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there - is both a `/v2` and a `@v2.0.1` in that example. One way to think about - it is that the module name now includes the `/v2`, so include `/v2` - whenever you are using the module name). - * If a module is version `v0` or `v1`, do not include the major version - in either the module path or the import path. - * In addition to public APIs, telemetry produced by stable instrumentation - will remain stable and backwards compatible. This is to avoid breaking - alerts and dashboard. - * Modules will be used to encapsulate instrumentation, detectors, exporters, - propagators, and any other independent sets of related components. - * Experimental modules still under active development will be versioned at - `v0` to imply the stability guarantee defined by - [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). - - > Major version zero (0.y.z) is for initial development. Anything MAY - > change at any time. The public API SHOULD NOT be considered stable. - - * Mature modules for which we guarantee a stable public API and telemetry will - be versioned with a major version greater than `v0`. - * Experimental modules will start their versioning at `v0.0.0` and will - increment their minor version when backwards incompatible changes are - released and increment their patch version when backwards compatible - changes are released. - * Stable contrib modules cannot depend on experimental modules from this - project. - * All stable contrib modules of the same major version with this project - will use the same entire version as this project. - * Stable modules may be released with an incremented minor or patch - version even though that module's code has not been changed. Instead - the only change that will have been included is to have updated that - modules dependency on this project's stable APIs. - * When an experimental module in contrib becomes stable a new stable - module version will be released and will include this now stable - module. The new stable module version will be an increment of the minor - version number and will be applied to all existing stable contrib - modules, this project's modules, and the newly stable module being - released. - * Contrib modules will be kept up to date with this project's releases. - * Due to the dependency contrib modules will implicitly have on this - project's modules the release of stable contrib modules to match the - released version number will be staggered after this project's release. - There is no explicit time guarantee for how long after this projects - release the contrib release will be. Effort should be made to keep them - as close in time as possible. - * No additional stable release in this project can be made until the - contrib repository has a matching stable release. - * No release can be made in the contrib repository after this project's - stable release except for a stable release of the contrib repository. -* GitHub releases will be made for all releases. -* Go modules will be made available at Go package mirrors. - -## Example Versioning Lifecycle - -To better understand the implementation of the above policy the following -example is provided. This project is simplified to include only the following -modules and their versions: - -* `otel`: `v0.14.0` -* `otel/trace`: `v0.14.0` -* `otel/metric`: `v0.14.0` -* `otel/baggage`: `v0.14.0` -* `otel/sdk/trace`: `v0.14.0` -* `otel/sdk/metric`: `v0.14.0` - -These modules have been developed to a point where the `otel/trace`, -`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they -should be considered for a stable release. The `otel/metric` and -`otel/sdk/metric` are still under active development and the `otel` module -depends on both `otel/trace` and `otel/metric`. - -The `otel` package is refactored to remove its dependencies on `otel/metric` so -it can be released as stable as well. With that done the following release -candidates are made: - -* `otel`: `v1.0.0-RC1` -* `otel/trace`: `v1.0.0-RC1` -* `otel/baggage`: `v1.0.0-RC1` -* `otel/sdk/trace`: `v1.0.0-RC1` - -The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. - -A few minor issues are discovered in the `otel/trace` package. These issues are -resolved with some minor, but backwards incompatible, changes and are released -as a second release candidate: - -* `otel`: `v1.0.0-RC2` -* `otel/trace`: `v1.0.0-RC2` -* `otel/baggage`: `v1.0.0-RC2` -* `otel/sdk/trace`: `v1.0.0-RC2` - -Notice that all module version numbers are incremented to adhere to our -versioning policy. - -After these release candidates have been evaluated to satisfaction, they are -released as version `v1.0.0`. - -* `otel`: `v1.0.0` -* `otel/trace`: `v1.0.0` -* `otel/baggage`: `v1.0.0` -* `otel/sdk/trace`: `v1.0.0` - -Since both the `go` utility and the Go module system support [the semantic -versioning definition of -precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release -will correctly be interpreted as the successor to the previous release -candidates. - -Active development of this project continues. The `otel/metric` module now has -backwards incompatible changes to its API that need to be released and the -`otel/baggage` module has a minor bug fix that needs to be released. The -following release is made: - -* `otel`: `v1.0.1` -* `otel/trace`: `v1.0.1` -* `otel/metric`: `v0.15.0` -* `otel/baggage`: `v1.0.1` -* `otel/sdk/trace`: `v1.0.1` -* `otel/sdk/metric`: `v0.15.0` - -Notice that, again, all stable module versions are incremented in unison and -the `otel/sdk/metric` package, which depends on the `otel/metric` package, also -bumped its version. This bump of the `otel/sdk/metric` package makes sense -given their coupling, though it is not explicitly required by our versioning -policy. - -As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a -point where they should be evaluated for stability. The `otel` module is -reintegrated with the `otel/metric` package and the following release is made: - -* `otel`: `v1.1.0-RC1` -* `otel/trace`: `v1.1.0-RC1` -* `otel/metric`: `v1.1.0-RC1` -* `otel/baggage`: `v1.1.0-RC1` -* `otel/sdk/trace`: `v1.1.0-RC1` -* `otel/sdk/metric`: `v1.1.0-RC1` - -All the modules are evaluated and determined to a viable stable release. They -are then released as version `v1.1.0` (the minor version is incremented to -indicate the addition of new signal). - -* `otel`: `v1.1.0` -* `otel/trace`: `v1.1.0` -* `otel/metric`: `v1.1.0` -* `otel/baggage`: `v1.1.0` -* `otel/sdk/trace`: `v1.1.0` -* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go deleted file mode 100644 index dafe7424d..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package attribute provides key and value attributes. -package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go deleted file mode 100644 index fe2bc5766..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "bytes" - "sync" - "sync/atomic" -) - -type ( - // Encoder is a mechanism for serializing an attribute set into a specific - // string representation that supports caching, to avoid repeated - // serialization. An example could be an exporter encoding the attribute - // set into a wire representation. - Encoder interface { - // Encode returns the serialized encoding of the attribute set using - // its Iterator. This result may be cached by a attribute.Set. - Encode(iterator Iterator) string - - // ID returns a value that is unique for each class of attribute - // encoder. Attribute encoders allocate these using `NewEncoderID`. - ID() EncoderID - } - - // EncoderID is used to identify distinct Encoder - // implementations, for caching encoded results. - EncoderID struct { - value uint64 - } - - // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of - // allocations used in encoding attributes. This implementation encodes a - // comma-separated list of key=value, with '/'-escaping of '=', ',', and - // '\'. - defaultAttrEncoder struct { - // pool is a pool of attribute set builders. The buffers in this pool - // grow to a size that most attribute encodings will not allocate new - // memory. - pool sync.Pool // *bytes.Buffer - } -) - -// escapeChar is used to ensure uniqueness of the attribute encoding where -// keys or values contain either '=' or ','. Since there is no parser needed -// for this encoding and its only requirement is to be unique, this choice is -// arbitrary. Users will see these in some exporters (e.g., stdout), so the -// backslash ('\') is used as a conventional choice. -const escapeChar = '\\' - -var ( - _ Encoder = &defaultAttrEncoder{} - - // encoderIDCounter is for generating IDs for other attribute encoders. - encoderIDCounter uint64 - - defaultEncoderOnce sync.Once - defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultAttrEncoder -) - -// NewEncoderID returns a unique attribute encoder ID. It should be called -// once per each type of attribute encoder. Preferably in init() or in var -// definition. -func NewEncoderID() EncoderID { - return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} -} - -// DefaultEncoder returns an attribute encoder that encodes attributes in such -// a way that each escaped attribute's key is followed by an equal sign and -// then by an escaped attribute's value. All key-value pairs are separated by -// a comma. -// -// Escaping is done by prepending a backslash before either a backslash, equal -// sign or a comma. -func DefaultEncoder() Encoder { - defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultAttrEncoder{ - pool: sync.Pool{ - New: func() interface{} { - return &bytes.Buffer{} - }, - }, - } - }) - return defaultEncoderInstance -} - -// Encode is a part of an implementation of the AttributeEncoder interface. -func (d *defaultAttrEncoder) Encode(iter Iterator) string { - buf := d.pool.Get().(*bytes.Buffer) - defer d.pool.Put(buf) - buf.Reset() - - for iter.Next() { - i, keyValue := iter.IndexedAttribute() - if i > 0 { - _, _ = buf.WriteRune(',') - } - copyAndEscape(buf, string(keyValue.Key)) - - _, _ = buf.WriteRune('=') - - if keyValue.Value.Type() == STRING { - copyAndEscape(buf, keyValue.Value.AsString()) - } else { - _, _ = buf.WriteString(keyValue.Value.Emit()) - } - } - return buf.String() -} - -// ID is a part of an implementation of the AttributeEncoder interface. -func (*defaultAttrEncoder) ID() EncoderID { - return defaultEncoderID -} - -// copyAndEscape escapes `=`, `,` and its own escape character (`\`), -// making the default encoding unique. -func copyAndEscape(buf *bytes.Buffer, val string) { - for _, ch := range val { - switch ch { - case '=', ',', escapeChar: - _, _ = buf.WriteRune(escapeChar) - } - _, _ = buf.WriteRune(ch) - } -} - -// Valid returns true if this encoder ID was allocated by -// `NewEncoderID`. Invalid encoder IDs will not be cached. -func (id EncoderID) Valid() bool { - return id.value != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go deleted file mode 100644 index 841b271fb..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Iterator allows iterating over the set of attributes in order, sorted by -// key. -type Iterator struct { - storage *Set - idx int -} - -// MergeIterator supports iterating over two sets of attributes while -// eliminating duplicate values from the combined set. The first iterator -// value takes precedence. -type MergeIterator struct { - one oneIterator - two oneIterator - current KeyValue -} - -type oneIterator struct { - iter Iterator - done bool - attr KeyValue -} - -// Next moves the iterator to the next position. Returns false if there are no -// more attributes. -func (i *Iterator) Next() bool { - i.idx++ - return i.idx < i.Len() -} - -// Label returns current KeyValue. Must be called only after Next returns -// true. -// -// Deprecated: Use Attribute instead. -func (i *Iterator) Label() KeyValue { - return i.Attribute() -} - -// Attribute returns the current KeyValue of the Iterator. It must be called -// only after Next returns true. -func (i *Iterator) Attribute() KeyValue { - kv, _ := i.storage.Get(i.idx) - return kv -} - -// IndexedLabel returns current index and attribute. Must be called only -// after Next returns true. -// -// Deprecated: Use IndexedAttribute instead. -func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// IndexedAttribute returns current index and attribute. Must be called only -// after Next returns true. -func (i *Iterator) IndexedAttribute() (int, KeyValue) { - return i.idx, i.Attribute() -} - -// Len returns a number of attributes in the iterated set. -func (i *Iterator) Len() int { - return i.storage.Len() -} - -// ToSlice is a convenience function that creates a slice of attributes from -// the passed iterator. The iterator is set up to start from the beginning -// before creating the slice. -func (i *Iterator) ToSlice() []KeyValue { - l := i.Len() - if l == 0 { - return nil - } - i.idx = -1 - slice := make([]KeyValue, 0, l) - for i.Next() { - slice = append(slice, i.Attribute()) - } - return slice -} - -// NewMergeIterator returns a MergeIterator for merging two attribute sets. -// Duplicates are resolved by taking the value from the first set. -func NewMergeIterator(s1, s2 *Set) MergeIterator { - mi := MergeIterator{ - one: makeOne(s1.Iter()), - two: makeOne(s2.Iter()), - } - return mi -} - -func makeOne(iter Iterator) oneIterator { - oi := oneIterator{ - iter: iter, - } - oi.advance() - return oi -} - -func (oi *oneIterator) advance() { - if oi.done = !oi.iter.Next(); !oi.done { - oi.attr = oi.iter.Attribute() - } -} - -// Next returns true if there is another attribute available. -func (m *MergeIterator) Next() bool { - if m.one.done && m.two.done { - return false - } - if m.one.done { - m.current = m.two.attr - m.two.advance() - return true - } - if m.two.done { - m.current = m.one.attr - m.one.advance() - return true - } - if m.one.attr.Key == m.two.attr.Key { - m.current = m.one.attr // first iterator attribute value wins - m.one.advance() - m.two.advance() - return true - } - if m.one.attr.Key < m.two.attr.Key { - m.current = m.one.attr - m.one.advance() - return true - } - m.current = m.two.attr - m.two.advance() - return true -} - -// Label returns the current value after Next() returns true. -// -// Deprecated: Use Attribute instead. -func (m *MergeIterator) Label() KeyValue { - return m.current -} - -// Attribute returns the current value after Next() returns true. -func (m *MergeIterator) Attribute() KeyValue { - return m.current -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go deleted file mode 100644 index 0656a04e4..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/key.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -// Key represents the key part in key-value pairs. It's a string. The -// allowed character set in the key depends on the use of the key. -type Key string - -// Bool creates a KeyValue instance with a BOOL Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Bool(name, value). -func (k Key) Bool(v bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolValue(v), - } -} - -// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- BoolSlice(name, value). -func (k Key) BoolSlice(v []bool) KeyValue { - return KeyValue{ - Key: k, - Value: BoolSliceValue(v), - } -} - -// Int creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int(name, value). -func (k Key) Int(v int) KeyValue { - return KeyValue{ - Key: k, - Value: IntValue(v), - } -} - -// IntSlice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- IntSlice(name, value). -func (k Key) IntSlice(v []int) KeyValue { - return KeyValue{ - Key: k, - Value: IntSliceValue(v), - } -} - -// Int64 creates a KeyValue instance with an INT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64(name, value). -func (k Key) Int64(v int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64Value(v), - } -} - -// Int64Slice creates a KeyValue instance with an INT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Int64Slice(name, value). -func (k Key) Int64Slice(v []int64) KeyValue { - return KeyValue{ - Key: k, - Value: Int64SliceValue(v), - } -} - -// Float64 creates a KeyValue instance with a FLOAT64 Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64(v float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64Value(v), - } -} - -// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- Float64(name, value). -func (k Key) Float64Slice(v []float64) KeyValue { - return KeyValue{ - Key: k, - Value: Float64SliceValue(v), - } -} - -// String creates a KeyValue instance with a STRING Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- String(name, value). -func (k Key) String(v string) KeyValue { - return KeyValue{ - Key: k, - Value: StringValue(v), - } -} - -// StringSlice creates a KeyValue instance with a STRINGSLICE Value. -// -// If creating both a key and value at the same time, use the provided -// convenience function instead -- StringSlice(name, value). -func (k Key) StringSlice(v []string) KeyValue { - return KeyValue{ - Key: k, - Value: StringSliceValue(v), - } -} - -// Defined returns true for non-empty keys. -func (k Key) Defined() bool { - return len(k) != 0 -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go deleted file mode 100644 index 1ddf3ce05..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/kv.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "fmt" -) - -// KeyValue holds a key and value pair. -type KeyValue struct { - Key Key - Value Value -} - -// Valid returns if kv is a valid OpenTelemetry attribute. -func (kv KeyValue) Valid() bool { - return kv.Key.Defined() && kv.Value.Type() != INVALID -} - -// Bool creates a KeyValue with a BOOL Value type. -func Bool(k string, v bool) KeyValue { - return Key(k).Bool(v) -} - -// BoolSlice creates a KeyValue with a BOOLSLICE Value type. -func BoolSlice(k string, v []bool) KeyValue { - return Key(k).BoolSlice(v) -} - -// Int creates a KeyValue with an INT64 Value type. -func Int(k string, v int) KeyValue { - return Key(k).Int(v) -} - -// IntSlice creates a KeyValue with an INT64SLICE Value type. -func IntSlice(k string, v []int) KeyValue { - return Key(k).IntSlice(v) -} - -// Int64 creates a KeyValue with an INT64 Value type. -func Int64(k string, v int64) KeyValue { - return Key(k).Int64(v) -} - -// Int64Slice creates a KeyValue with an INT64SLICE Value type. -func Int64Slice(k string, v []int64) KeyValue { - return Key(k).Int64Slice(v) -} - -// Float64 creates a KeyValue with a FLOAT64 Value type. -func Float64(k string, v float64) KeyValue { - return Key(k).Float64(v) -} - -// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. -func Float64Slice(k string, v []float64) KeyValue { - return Key(k).Float64Slice(v) -} - -// String creates a KeyValue with a STRING Value type. -func String(k, v string) KeyValue { - return Key(k).String(v) -} - -// StringSlice creates a KeyValue with a STRINGSLICE Value type. -func StringSlice(k string, v []string) KeyValue { - return Key(k).StringSlice(v) -} - -// Stringer creates a new key-value pair with a passed name and a string -// value generated by the passed Stringer interface. -func Stringer(k string, v fmt.Stringer) KeyValue { - return Key(k).String(v.String()) -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go deleted file mode 100644 index 26be59832..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ /dev/null @@ -1,424 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "reflect" - "sort" -) - -type ( - // Set is the representation for a distinct attribute set. It manages an - // immutable set of attributes, with an internal cache for storing - // attribute encodings. - // - // This type supports the Equivalent method of comparison using values of - // type Distinct. - Set struct { - equivalent Distinct - } - - // Distinct wraps a variable-size array of KeyValue, constructed with keys - // in sorted order. This can be used as a map key or for equality checking - // between Sets. - Distinct struct { - iface interface{} - } - - // Filter supports removing certain attributes from attribute sets. When - // the filter returns true, the attribute will be kept in the filtered - // attribute set. When the filter returns false, the attribute is excluded - // from the filtered attribute set, and the attribute instead appears in - // the removed list of excluded attributes. - Filter func(KeyValue) bool - - // Sortable implements sort.Interface, used for sorting KeyValue. This is - // an exported type to support a memory optimization. A pointer to one of - // these is needed for the call to sort.Stable(), which the caller may - // provide in order to avoid an allocation. See NewSetWithSortable(). - Sortable []KeyValue -) - -var ( - // keyValueType is used in computeDistinctReflect. - keyValueType = reflect.TypeOf(KeyValue{}) - - // emptySet is returned for empty attribute sets. - emptySet = &Set{ - equivalent: Distinct{ - iface: [0]KeyValue{}, - }, - } -) - -// EmptySet returns a reference to a Set with no elements. -// -// This is a convenience provided for optimized calling utility. -func EmptySet() *Set { - return emptySet -} - -// reflectValue abbreviates reflect.ValueOf(d). -func (d Distinct) reflectValue() reflect.Value { - return reflect.ValueOf(d.iface) -} - -// Valid returns true if this value refers to a valid Set. -func (d Distinct) Valid() bool { - return d.iface != nil -} - -// Len returns the number of attributes in this set. -func (l *Set) Len() int { - if l == nil || !l.equivalent.Valid() { - return 0 - } - return l.equivalent.reflectValue().Len() -} - -// Get returns the KeyValue at ordered position idx in this set. -func (l *Set) Get(idx int) (KeyValue, bool) { - if l == nil { - return KeyValue{}, false - } - value := l.equivalent.reflectValue() - - if idx >= 0 && idx < value.Len() { - // Note: The Go compiler successfully avoids an allocation for - // the interface{} conversion here: - return value.Index(idx).Interface().(KeyValue), true - } - - return KeyValue{}, false -} - -// Value returns the value of a specified key in this set. -func (l *Set) Value(k Key) (Value, bool) { - if l == nil { - return Value{}, false - } - rValue := l.equivalent.reflectValue() - vlen := rValue.Len() - - idx := sort.Search(vlen, func(idx int) bool { - return rValue.Index(idx).Interface().(KeyValue).Key >= k - }) - if idx >= vlen { - return Value{}, false - } - keyValue := rValue.Index(idx).Interface().(KeyValue) - if k == keyValue.Key { - return keyValue.Value, true - } - return Value{}, false -} - -// HasValue tests whether a key is defined in this set. -func (l *Set) HasValue(k Key) bool { - if l == nil { - return false - } - _, ok := l.Value(k) - return ok -} - -// Iter returns an iterator for visiting the attributes in this set. -func (l *Set) Iter() Iterator { - return Iterator{ - storage: l, - idx: -1, - } -} - -// ToSlice returns the set of attributes belonging to this set, sorted, where -// keys appear no more than once. -func (l *Set) ToSlice() []KeyValue { - iter := l.Iter() - return iter.ToSlice() -} - -// Equivalent returns a value that may be used as a map key. The Distinct type -// guarantees that the result will equal the equivalent. Distinct value of any -// attribute set with the same elements as this, where sets are made unique by -// choosing the last value in the input for any given key. -func (l *Set) Equivalent() Distinct { - if l == nil || !l.equivalent.Valid() { - return emptySet.equivalent - } - return l.equivalent -} - -// Equals returns true if the argument set is equivalent to this set. -func (l *Set) Equals(o *Set) bool { - return l.Equivalent() == o.Equivalent() -} - -// Encoded returns the encoded form of this set, according to encoder. -func (l *Set) Encoded(encoder Encoder) string { - if l == nil || encoder == nil { - return "" - } - - return encoder.Encode(l.Iter()) -} - -func empty() Set { - return Set{ - equivalent: emptySet.equivalent, - } -} - -// NewSet returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// Except for empty sets, this method adds an additional allocation compared -// with calls that include a Sortable. -func NewSet(kvs ...KeyValue) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) - return s -} - -// NewSetWithSortable returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Sortable option as a memory optimization. -func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { - // Check for empty set. - if len(kvs) == 0 { - return empty() - } - s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) - return s -} - -// NewSetWithFiltered returns a new Set. See the documentation for -// NewSetWithSortableFiltered for more details. -// -// This call includes a Filter to include/exclude attribute keys from the -// return value. Excluded keys are returned as a slice of attribute values. -func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - return NewSetWithSortableFiltered(kvs, new(Sortable), filter) -} - -// NewSetWithSortableFiltered returns a new Set. -// -// Duplicate keys are eliminated by taking the last value. This -// re-orders the input slice so that unique last-values are contiguous -// at the end of the slice. -// -// This ensures the following: -// -// - Last-value-wins semantics -// - Caller sees the reordering, but doesn't lose values -// - Repeated call preserve last-value wins. -// -// Note that methods are defined on Set, although this returns Set. Callers -// can avoid memory allocations by: -// -// - allocating a Sortable for use as a temporary in this method -// - allocating a Set for storing the return value of this constructor. -// -// The result maintains a cache of encoded attributes, by attribute.EncoderID. -// This value should not be copied after its first use. -// -// The second []KeyValue return value is a list of attributes that were -// excluded by the Filter (if non-nil). -func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { - // Check for empty set. - if len(kvs) == 0 { - return empty(), nil - } - - *tmp = kvs - - // Stable sort so the following de-duplication can implement - // last-value-wins semantics. - sort.Stable(tmp) - - *tmp = nil - - position := len(kvs) - 1 - offset := position - 1 - - // The requirements stated above require that the stable - // result be placed in the end of the input slice, while - // overwritten values are swapped to the beginning. - // - // De-duplicate with last-value-wins semantics. Preserve - // duplicate values at the beginning of the input slice. - for ; offset >= 0; offset-- { - if kvs[offset].Key == kvs[position].Key { - continue - } - position-- - kvs[offset], kvs[position] = kvs[position], kvs[offset] - } - if filter != nil { - return filterSet(kvs[position:], filter) - } - return Set{ - equivalent: computeDistinct(kvs[position:]), - }, nil -} - -// filterSet reorders kvs so that included keys are contiguous at the end of -// the slice, while excluded keys precede the included keys. -func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - var excluded []KeyValue - - // Move attributes that do not match the filter so they're adjacent before - // calling computeDistinct(). - distinctPosition := len(kvs) - - // Swap indistinct keys forward and distinct keys toward the - // end of the slice. - offset := len(kvs) - 1 - for ; offset >= 0; offset-- { - if filter(kvs[offset]) { - distinctPosition-- - kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] - continue - } - } - excluded = kvs[:distinctPosition] - - return Set{ - equivalent: computeDistinct(kvs[distinctPosition:]), - }, excluded -} - -// Filter returns a filtered copy of this Set. See the documentation for -// NewSetWithSortableFiltered for more details. -func (l *Set) Filter(re Filter) (Set, []KeyValue) { - if re == nil { - return Set{ - equivalent: l.equivalent, - }, nil - } - - // Note: This could be refactored to avoid the temporary slice - // allocation, if it proves to be expensive. - return filterSet(l.ToSlice(), re) -} - -// computeDistinct returns a Distinct using either the fixed- or -// reflect-oriented code path, depending on the size of the input. The input -// slice is assumed to already be sorted and de-duplicated. -func computeDistinct(kvs []KeyValue) Distinct { - iface := computeDistinctFixed(kvs) - if iface == nil { - iface = computeDistinctReflect(kvs) - } - return Distinct{ - iface: iface, - } -} - -// computeDistinctFixed computes a Distinct for small slices. It returns nil -// if the input is too large for this code path. -func computeDistinctFixed(kvs []KeyValue) interface{} { - switch len(kvs) { - case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr - default: - return nil - } -} - -// computeDistinctReflect computes a Distinct using reflection, works for any -// size input. -func computeDistinctReflect(kvs []KeyValue) interface{} { - at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() - for i, keyValue := range kvs { - *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue - } - return at.Interface() -} - -// MarshalJSON returns the JSON encoding of the Set. -func (l *Set) MarshalJSON() ([]byte, error) { - return json.Marshal(l.equivalent.iface) -} - -// MarshalLog is the marshaling function used by the logging system to represent this exporter. -func (l Set) MarshalLog() interface{} { - kvs := make(map[string]string) - for _, kv := range l.ToSlice() { - kvs[string(kv.Key)] = kv.Value.Emit() - } - return kvs -} - -// Len implements sort.Interface. -func (l *Sortable) Len() int { - return len(*l) -} - -// Swap implements sort.Interface. -func (l *Sortable) Swap(i, j int) { - (*l)[i], (*l)[j] = (*l)[j], (*l)[i] -} - -// Less implements sort.Interface. -func (l *Sortable) Less(i, j int) bool { - return (*l)[i].Key < (*l)[j].Key -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go deleted file mode 100644 index e584b2477..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code generated by "stringer -type=Type"; DO NOT EDIT. - -package attribute - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[INVALID-0] - _ = x[BOOL-1] - _ = x[INT64-2] - _ = x[FLOAT64-3] - _ = x[STRING-4] - _ = x[BOOLSLICE-5] - _ = x[INT64SLICE-6] - _ = x[FLOAT64SLICE-7] - _ = x[STRINGSLICE-8] -} - -const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" - -var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go deleted file mode 100644 index cb21dd5c0..000000000 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package attribute // import "go.opentelemetry.io/otel/attribute" - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "go.opentelemetry.io/otel/internal" - "go.opentelemetry.io/otel/internal/attribute" -) - -//go:generate stringer -type=Type - -// Type describes the type of the data Value holds. -type Type int // nolint: revive // redefines builtin Type. - -// Value represents the value part in key-value pairs. -type Value struct { - vtype Type - numeric uint64 - stringly string - slice interface{} -} - -const ( - // INVALID is used for a Value with no value set. - INVALID Type = iota - // BOOL is a boolean Type Value. - BOOL - // INT64 is a 64-bit signed integral Type Value. - INT64 - // FLOAT64 is a 64-bit floating point Type Value. - FLOAT64 - // STRING is a string Type Value. - STRING - // BOOLSLICE is a slice of booleans Type Value. - BOOLSLICE - // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. - INT64SLICE - // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. - FLOAT64SLICE - // STRINGSLICE is a slice of strings Type Value. - STRINGSLICE -) - -// BoolValue creates a BOOL Value. -func BoolValue(v bool) Value { - return Value{ - vtype: BOOL, - numeric: internal.BoolToRaw(v), - } -} - -// BoolSliceValue creates a BOOLSLICE Value. -func BoolSliceValue(v []bool) Value { - return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)} -} - -// IntValue creates an INT64 Value. -func IntValue(v int) Value { - return Int64Value(int64(v)) -} - -// IntSliceValue creates an INTSLICE Value. -func IntSliceValue(v []int) Value { - var int64Val int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) - for i, val := range v { - cp.Elem().Index(i).SetInt(int64(val)) - } - return Value{ - vtype: INT64SLICE, - slice: cp.Elem().Interface(), - } -} - -// Int64Value creates an INT64 Value. -func Int64Value(v int64) Value { - return Value{ - vtype: INT64, - numeric: internal.Int64ToRaw(v), - } -} - -// Int64SliceValue creates an INT64SLICE Value. -func Int64SliceValue(v []int64) Value { - return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)} -} - -// Float64Value creates a FLOAT64 Value. -func Float64Value(v float64) Value { - return Value{ - vtype: FLOAT64, - numeric: internal.Float64ToRaw(v), - } -} - -// Float64SliceValue creates a FLOAT64SLICE Value. -func Float64SliceValue(v []float64) Value { - return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)} -} - -// StringValue creates a STRING Value. -func StringValue(v string) Value { - return Value{ - vtype: STRING, - stringly: v, - } -} - -// StringSliceValue creates a STRINGSLICE Value. -func StringSliceValue(v []string) Value { - return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)} -} - -// Type returns a type of the Value. -func (v Value) Type() Type { - return v.vtype -} - -// AsBool returns the bool value. Make sure that the Value's type is -// BOOL. -func (v Value) AsBool() bool { - return internal.RawToBool(v.numeric) -} - -// AsBoolSlice returns the []bool value. Make sure that the Value's type is -// BOOLSLICE. -func (v Value) AsBoolSlice() []bool { - if v.vtype != BOOLSLICE { - return nil - } - return v.asBoolSlice() -} - -func (v Value) asBoolSlice() []bool { - return attribute.AsBoolSlice(v.slice) -} - -// AsInt64 returns the int64 value. Make sure that the Value's type is -// INT64. -func (v Value) AsInt64() int64 { - return internal.RawToInt64(v.numeric) -} - -// AsInt64Slice returns the []int64 value. Make sure that the Value's type is -// INT64SLICE. -func (v Value) AsInt64Slice() []int64 { - if v.vtype != INT64SLICE { - return nil - } - return v.asInt64Slice() -} - -func (v Value) asInt64Slice() []int64 { - return attribute.AsInt64Slice(v.slice) -} - -// AsFloat64 returns the float64 value. Make sure that the Value's -// type is FLOAT64. -func (v Value) AsFloat64() float64 { - return internal.RawToFloat64(v.numeric) -} - -// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is -// FLOAT64SLICE. -func (v Value) AsFloat64Slice() []float64 { - if v.vtype != FLOAT64SLICE { - return nil - } - return v.asFloat64Slice() -} - -func (v Value) asFloat64Slice() []float64 { - return attribute.AsFloat64Slice(v.slice) -} - -// AsString returns the string value. Make sure that the Value's type -// is STRING. -func (v Value) AsString() string { - return v.stringly -} - -// AsStringSlice returns the []string value. Make sure that the Value's type is -// STRINGSLICE. -func (v Value) AsStringSlice() []string { - if v.vtype != STRINGSLICE { - return nil - } - return v.asStringSlice() -} - -func (v Value) asStringSlice() []string { - return attribute.AsStringSlice(v.slice) -} - -type unknownValueType struct{} - -// AsInterface returns Value's data as interface{}. -func (v Value) AsInterface() interface{} { - switch v.Type() { - case BOOL: - return v.AsBool() - case BOOLSLICE: - return v.asBoolSlice() - case INT64: - return v.AsInt64() - case INT64SLICE: - return v.asInt64Slice() - case FLOAT64: - return v.AsFloat64() - case FLOAT64SLICE: - return v.asFloat64Slice() - case STRING: - return v.stringly - case STRINGSLICE: - return v.asStringSlice() - } - return unknownValueType{} -} - -// Emit returns a string representation of Value's data. -func (v Value) Emit() string { - switch v.Type() { - case BOOLSLICE: - return fmt.Sprint(v.asBoolSlice()) - case BOOL: - return strconv.FormatBool(v.AsBool()) - case INT64SLICE: - return fmt.Sprint(v.asInt64Slice()) - case INT64: - return strconv.FormatInt(v.AsInt64(), 10) - case FLOAT64SLICE: - return fmt.Sprint(v.asFloat64Slice()) - case FLOAT64: - return fmt.Sprint(v.AsFloat64()) - case STRINGSLICE: - return fmt.Sprint(v.asStringSlice()) - case STRING: - return v.stringly - default: - return "unknown" - } -} - -// MarshalJSON returns the JSON encoding of the Value. -func (v Value) MarshalJSON() ([]byte, error) { - var jsonVal struct { - Type string - Value interface{} - } - jsonVal.Type = v.Type().String() - jsonVal.Value = v.AsInterface() - return json.Marshal(jsonVal) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go deleted file mode 100644 index a36db8f8d..000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "errors" - "fmt" - "net/url" - "regexp" - "strings" - - "go.opentelemetry.io/otel/internal/baggage" -) - -const ( - maxMembers = 180 - maxBytesPerMembers = 4096 - maxBytesPerBaggageString = 8192 - - listDelimiter = "," - keyValueDelimiter = "=" - propertyDelimiter = ";" - - keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` - valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` - keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` -) - -var ( - keyRe = regexp.MustCompile(`^` + keyDef + `$`) - valueRe = regexp.MustCompile(`^` + valueDef + `$`) - propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) -) - -var ( - errInvalidKey = errors.New("invalid key") - errInvalidValue = errors.New("invalid value") - errInvalidProperty = errors.New("invalid baggage list-member property") - errInvalidMember = errors.New("invalid baggage list-member") - errMemberNumber = errors.New("too many list-members in baggage-string") - errMemberBytes = errors.New("list-member too large") - errBaggageBytes = errors.New("baggage-string too large") -) - -// Property is an additional metadata entry for a baggage list-member. -type Property struct { - key, value string - - // hasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - hasValue bool - - // hasData indicates whether the created property contains data or not. - // Properties that do not contain data are invalid with no other check - // required. - hasData bool -} - -// NewKeyProperty returns a new Property for key. -// -// If key is invalid, an error will be returned. -func NewKeyProperty(key string) (Property, error) { - if !keyRe.MatchString(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - - p := Property{key: key, hasData: true} - return p, nil -} - -// NewKeyValueProperty returns a new Property for key with value. -// -// If key or value are invalid, an error will be returned. -func NewKeyValueProperty(key, value string) (Property, error) { - if !keyRe.MatchString(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - - p := Property{ - key: key, - value: value, - hasValue: true, - hasData: true, - } - return p, nil -} - -func newInvalidProperty() Property { - return Property{} -} - -// parseProperty attempts to decode a Property from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -func parseProperty(property string) (Property, error) { - if property == "" { - return newInvalidProperty(), nil - } - - match := propertyRe.FindStringSubmatch(property) - if len(match) != 4 { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) - } - - p := Property{hasData: true} - if match[1] != "" { - p.key = match[1] - } else { - p.key = match[2] - p.value = match[3] - p.hasValue = true - } - - return p, nil -} - -// validate ensures p conforms to the W3C Baggage specification, returning an -// error otherwise. -func (p Property) validate() error { - errFunc := func(err error) error { - return fmt.Errorf("invalid property: %w", err) - } - - if !p.hasData { - return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) - } - - if !keyRe.MatchString(p.key) { - return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) - } - if p.hasValue && !valueRe.MatchString(p.value) { - return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) - } - if !p.hasValue && p.value != "" { - return errFunc(errors.New("inconsistent value")) - } - return nil -} - -// Key returns the Property key. -func (p Property) Key() string { - return p.key -} - -// Value returns the Property value. Additionally, a boolean value is returned -// indicating if the returned value is the empty if the Property has a value -// that is empty or if the value is not set. -func (p Property) Value() (string, bool) { - return p.value, p.hasValue -} - -// String encodes Property into a string compliant with the W3C Baggage -// specification. -func (p Property) String() string { - if p.hasValue { - return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) - } - return p.key -} - -type properties []Property - -func fromInternalProperties(iProps []baggage.Property) properties { - if len(iProps) == 0 { - return nil - } - - props := make(properties, len(iProps)) - for i, p := range iProps { - props[i] = Property{ - key: p.Key, - value: p.Value, - hasValue: p.HasValue, - } - } - return props -} - -func (p properties) asInternal() []baggage.Property { - if len(p) == 0 { - return nil - } - - iProps := make([]baggage.Property, len(p)) - for i, prop := range p { - iProps[i] = baggage.Property{ - Key: prop.key, - Value: prop.value, - HasValue: prop.hasValue, - } - } - return iProps -} - -func (p properties) Copy() properties { - if len(p) == 0 { - return nil - } - - props := make(properties, len(p)) - copy(props, p) - return props -} - -// validate ensures each Property in p conforms to the W3C Baggage -// specification, returning an error otherwise. -func (p properties) validate() error { - for _, prop := range p { - if err := prop.validate(); err != nil { - return err - } - } - return nil -} - -// String encodes properties into a string compliant with the W3C Baggage -// specification. -func (p properties) String() string { - props := make([]string, len(p)) - for i, prop := range p { - props[i] = prop.String() - } - return strings.Join(props, propertyDelimiter) -} - -// Member is a list-member of a baggage-string as defined by the W3C Baggage -// specification. -type Member struct { - key, value string - properties properties - - // hasData indicates whether the created property contains data or not. - // Properties that do not contain data are invalid with no other check - // required. - hasData bool -} - -// NewMember returns a new Member from the passed arguments. The key will be -// used directly while the value will be url decoded after validation. An error -// is returned if the created Member would be invalid according to the W3C -// Baggage specification. -func NewMember(key, value string, props ...Property) (Member, error) { - m := Member{ - key: key, - value: value, - properties: properties(props).Copy(), - hasData: true, - } - if err := m.validate(); err != nil { - return newInvalidMember(), err - } - decodedValue, err := url.QueryUnescape(value) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - m.value = decodedValue - return m, nil -} - -func newInvalidMember() Member { - return Member{} -} - -// parseMember attempts to decode a Member from the passed string. It returns -// an error if the input is invalid according to the W3C Baggage -// specification. -func parseMember(member string) (Member, error) { - if n := len(member); n > maxBytesPerMembers { - return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) - } - - var ( - key, value string - props properties - ) - - parts := strings.SplitN(member, propertyDelimiter, 2) - switch len(parts) { - case 2: - // Parse the member properties. - for _, pStr := range strings.Split(parts[1], propertyDelimiter) { - p, err := parseProperty(pStr) - if err != nil { - return newInvalidMember(), err - } - props = append(props, p) - } - fallthrough - case 1: - // Parse the member key/value pair. - - // Take into account a value can contain equal signs (=). - kv := strings.SplitN(parts[0], keyValueDelimiter, 2) - if len(kv) != 2 { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) - } - // "Leading and trailing whitespaces are allowed but MUST be trimmed - // when converting the header into a data structure." - key = strings.TrimSpace(kv[0]) - var err error - value, err = url.QueryUnescape(strings.TrimSpace(kv[1])) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", err, value) - } - if !keyRe.MatchString(key) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - default: - // This should never happen unless a developer has changed the string - // splitting somehow. Panic instead of failing silently and allowing - // the bug to slip past the CI checks. - panic("failed to parse baggage member") - } - - return Member{key: key, value: value, properties: props, hasData: true}, nil -} - -// validate ensures m conforms to the W3C Baggage specification. -// A key is just an ASCII string, but a value must be URL encoded UTF-8, -// returning an error otherwise. -func (m Member) validate() error { - if !m.hasData { - return fmt.Errorf("%w: %q", errInvalidMember, m) - } - - if !keyRe.MatchString(m.key) { - return fmt.Errorf("%w: %q", errInvalidKey, m.key) - } - if !valueRe.MatchString(m.value) { - return fmt.Errorf("%w: %q", errInvalidValue, m.value) - } - return m.properties.validate() -} - -// Key returns the Member key. -func (m Member) Key() string { return m.key } - -// Value returns the Member value. -func (m Member) Value() string { return m.value } - -// Properties returns a copy of the Member properties. -func (m Member) Properties() []Property { return m.properties.Copy() } - -// String encodes Member into a string compliant with the W3C Baggage -// specification. -func (m Member) String() string { - // A key is just an ASCII string, but a value is URL encoded UTF-8. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) - if len(m.properties) > 0 { - s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) - } - return s -} - -// Baggage is a list of baggage members representing the baggage-string as -// defined by the W3C Baggage specification. -type Baggage struct { //nolint:golint - list baggage.List -} - -// New returns a new valid Baggage. It returns an error if it results in a -// Baggage exceeding limits set in that specification. -// -// It expects all the provided members to have already been validated. -func New(members ...Member) (Baggage, error) { - if len(members) == 0 { - return Baggage{}, nil - } - - b := make(baggage.List) - for _, m := range members { - if !m.hasData { - return Baggage{}, errInvalidMember - } - - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - } - - // Check member numbers after deduplication. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber - } - - bag := Baggage{b} - if n := len(bag.String()); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - - return bag, nil -} - -// Parse attempts to decode a baggage-string from the passed string. It -// returns an error if the input is invalid according to the W3C Baggage -// specification. -// -// If there are duplicate list-members contained in baggage, the last one -// defined (reading left-to-right) will be the only one kept. This diverges -// from the W3C Baggage specification which allows duplicate list-members, but -// conforms to the OpenTelemetry Baggage specification. -func Parse(bStr string) (Baggage, error) { - if bStr == "" { - return Baggage{}, nil - } - - if n := len(bStr); n > maxBytesPerBaggageString { - return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) - } - - b := make(baggage.List) - for _, memberStr := range strings.Split(bStr, listDelimiter) { - m, err := parseMember(memberStr) - if err != nil { - return Baggage{}, err - } - // OpenTelemetry resolves duplicates by last-one-wins. - b[m.key] = baggage.Item{ - Value: m.value, - Properties: m.properties.asInternal(), - } - } - - // OpenTelemetry does not allow for duplicate list-members, but the W3C - // specification does. Now that we have deduplicated, ensure the baggage - // does not exceed list-member limits. - if len(b) > maxMembers { - return Baggage{}, errMemberNumber - } - - return Baggage{b}, nil -} - -// Member returns the baggage list-member identified by key. -// -// If there is no list-member matching the passed key the returned Member will -// be a zero-value Member. -// The returned member is not validated, as we assume the validation happened -// when it was added to the Baggage. -func (b Baggage) Member(key string) Member { - v, ok := b.list[key] - if !ok { - // We do not need to worry about distinguishing between the situation - // where a zero-valued Member is included in the Baggage because a - // zero-valued Member is invalid according to the W3C Baggage - // specification (it has an empty key). - return newInvalidMember() - } - - return Member{ - key: key, - value: v.Value, - properties: fromInternalProperties(v.Properties), - hasData: true, - } -} - -// Members returns all the baggage list-members. -// The order of the returned list-members does not have significance. -// -// The returned members are not validated, as we assume the validation happened -// when they were added to the Baggage. -func (b Baggage) Members() []Member { - if len(b.list) == 0 { - return nil - } - - members := make([]Member, 0, len(b.list)) - for k, v := range b.list { - members = append(members, Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - hasData: true, - }) - } - return members -} - -// SetMember returns a copy the Baggage with the member included. If the -// baggage contains a Member with the same key the existing Member is -// replaced. -// -// If member is invalid according to the W3C Baggage specification, an error -// is returned with the original Baggage. -func (b Baggage) SetMember(member Member) (Baggage, error) { - if !member.hasData { - return b, errInvalidMember - } - - n := len(b.list) - if _, ok := b.list[member.key]; !ok { - n++ - } - list := make(baggage.List, n) - - for k, v := range b.list { - // Do not copy if we are just going to overwrite. - if k == member.key { - continue - } - list[k] = v - } - - list[member.key] = baggage.Item{ - Value: member.value, - Properties: member.properties.asInternal(), - } - - return Baggage{list: list}, nil -} - -// DeleteMember returns a copy of the Baggage with the list-member identified -// by key removed. -func (b Baggage) DeleteMember(key string) Baggage { - n := len(b.list) - if _, ok := b.list[key]; ok { - n-- - } - list := make(baggage.List, n) - - for k, v := range b.list { - if k == key { - continue - } - list[k] = v - } - - return Baggage{list: list} -} - -// Len returns the number of list-members in the Baggage. -func (b Baggage) Len() int { - return len(b.list) -} - -// String encodes Baggage into a string compliant with the W3C Baggage -// specification. The returned string will be invalid if the Baggage contains -// any invalid list-members. -func (b Baggage) String() string { - members := make([]string, 0, len(b.list)) - for k, v := range b.list { - members = append(members, Member{ - key: k, - value: v.Value, - properties: fromInternalProperties(v.Properties), - }.String()) - } - return strings.Join(members, listDelimiter) -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go deleted file mode 100644 index 24b34b756..000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/context.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/baggage" - -import ( - "context" - - "go.opentelemetry.io/otel/internal/baggage" -) - -// ContextWithBaggage returns a copy of parent with baggage. -func ContextWithBaggage(parent context.Context, b Baggage) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, b.list) -} - -// ContextWithoutBaggage returns a copy of parent with no baggage. -func ContextWithoutBaggage(parent context.Context) context.Context { - // Delegate so any hooks for the OpenTracing bridge are handled. - return baggage.ContextWithList(parent, nil) -} - -// FromContext returns the baggage contained in ctx. -func FromContext(ctx context.Context) Baggage { - // Delegate so any hooks for the OpenTracing bridge are handled. - return Baggage{list: baggage.ListFromContext(ctx)} -} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go deleted file mode 100644 index 4545100df..000000000 --- a/vendor/go.opentelemetry.io/otel/baggage/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package baggage provides functionality for storing and retrieving -baggage items in Go context. For propagating the baggage, see the -go.opentelemetry.io/otel/propagation package. -*/ -package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go deleted file mode 100644 index 587ebae4e..000000000 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package codes // import "go.opentelemetry.io/otel/codes" - -import ( - "encoding/json" - "fmt" - "strconv" -) - -const ( - // Unset is the default status code. - Unset Code = 0 - - // Error indicates the operation contains an error. - // - // NOTE: The error code in OTLP is 2. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Error Code = 1 - - // Ok indicates operation has been validated by an Application developers - // or Operator to have completed successfully, or contain no error. - // - // NOTE: The Ok code in OTLP is 1. - // The value of this enum is only relevant to the internals - // of the Go SDK. - Ok Code = 2 - - maxCode = 3 -) - -// Code is an 32-bit representation of a status state. -type Code uint32 - -var codeToStr = map[Code]string{ - Unset: "Unset", - Error: "Error", - Ok: "Ok", -} - -var strToCode = map[string]Code{ - `"Unset"`: Unset, - `"Error"`: Error, - `"Ok"`: Ok, -} - -// String returns the Code as a string. -func (c Code) String() string { - return codeToStr[c] -} - -// UnmarshalJSON unmarshals b into the Code. -// -// This is based on the functionality in the gRPC codes package: -// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 -func (c *Code) UnmarshalJSON(b []byte) error { - // From json.Unmarshaler: By convention, to approximate the behavior of - // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as - // a no-op. - if string(b) == "null" { - return nil - } - if c == nil { - return fmt.Errorf("nil receiver passed to UnmarshalJSON") - } - - var x interface{} - if err := json.Unmarshal(b, &x); err != nil { - return err - } - switch x.(type) { - case string: - if jc, ok := strToCode[string(b)]; ok { - *c = jc - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - case float64: - if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { - if ci >= maxCode { - return fmt.Errorf("invalid code: %q", ci) - } - - *c = Code(ci) - return nil - } - return fmt.Errorf("invalid code: %q", string(b)) - default: - return fmt.Errorf("invalid code: %q", string(b)) - } -} - -// MarshalJSON returns c as the JSON encoding of c. -func (c *Code) MarshalJSON() ([]byte, error) { - if c == nil { - return []byte("null"), nil - } - str, ok := codeToStr[*c] - if !ok { - return nil, fmt.Errorf("invalid code: %d", *c) - } - return []byte(fmt.Sprintf("%q", str)), nil -} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go deleted file mode 100644 index df3e0f1b6..000000000 --- a/vendor/go.opentelemetry.io/otel/codes/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package codes defines the canonical error codes used by OpenTelemetry. - -It conforms to [the OpenTelemetry -specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). -*/ -package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go deleted file mode 100644 index daa36c89d..000000000 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package otel provides global access to the OpenTelemetry API. The subpackages of -the otel package provide an implementation of the OpenTelemetry API. - -The provided API is used to instrument code and measure data about that code's -performance and operation. The measured data, by default, is not processed or -transmitted anywhere. An implementation of the OpenTelemetry SDK, like the -default SDK implementation (go.opentelemetry.io/otel/sdk), and associated -exporters are used to process and transport this data. - -To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. - -To read more about tracing, see go.opentelemetry.io/otel/trace. - -To read more about metrics, see go.opentelemetry.io/otel/metric. - -To read more about propagation, see go.opentelemetry.io/otel/propagation and -go.opentelemetry.io/otel/baggage. -*/ -package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go deleted file mode 100644 index 72fad8541..000000000 --- a/vendor/go.opentelemetry.io/otel/error_handler.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -// ErrorHandler handles irremediable events. -type ErrorHandler interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Handle handles any error deemed irremediable by an OpenTelemetry - // component. - Handle(error) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// ErrorHandlerFunc is a convenience adapter to allow the use of a function -// as an ErrorHandler. -type ErrorHandlerFunc func(error) - -var _ ErrorHandler = ErrorHandlerFunc(nil) - -// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. -func (f ErrorHandlerFunc) Handle(err error) { - f(err) -} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh deleted file mode 100644 index 9a58fb1d3..000000000 --- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -top_dir='.' -if [[ $# -gt 0 ]]; then - top_dir="${1}" -fi - -p=$(pwd) -mod_dirs=() - -# Note `mapfile` does not exist in older bash versions: -# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash - -while IFS= read -r line; do - mod_dirs+=("$line") -done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) - -for mod_dir in "${mod_dirs[@]}"; do - cd "${mod_dir}" - - while IFS= read -r line; do - echo ".${line#${p}}" - done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') - cd "${p}" -done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go deleted file mode 100644 index ecd363ab5..000000000 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "log" - "os" - "sync/atomic" - "unsafe" -) - -var ( - // globalErrorHandler provides an ErrorHandler that can be used - // throughout an OpenTelemetry instrumented project. When a user - // specified ErrorHandler is registered (`SetErrorHandler`) all calls to - // `Handle` and will be delegated to the registered ErrorHandler. - globalErrorHandler = defaultErrorHandler() - - // Compile-time check that delegator implements ErrorHandler. - _ ErrorHandler = (*delegator)(nil) - // Compile-time check that errLogger implements ErrorHandler. - _ ErrorHandler = (*errLogger)(nil) -) - -type delegator struct { - delegate unsafe.Pointer -} - -func (d *delegator) Handle(err error) { - d.getDelegate().Handle(err) -} - -func (d *delegator) getDelegate() ErrorHandler { - return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) -} - -// setDelegate sets the ErrorHandler delegate. -func (d *delegator) setDelegate(eh ErrorHandler) { - atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) -} - -func defaultErrorHandler() *delegator { - d := &delegator{} - d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) - return d -} - -// errLogger logs errors if no delegate is set, otherwise they are delegated. -type errLogger struct { - l *log.Logger -} - -// Handle logs err if no delegate is set, otherwise it is delegated. -func (h *errLogger) Handle(err error) { - h.l.Print(err) -} - -// GetErrorHandler returns the global ErrorHandler instance. -// -// The default ErrorHandler instance returned will log all errors to STDERR -// until an override ErrorHandler is set with SetErrorHandler. All -// ErrorHandler returned prior to this will automatically forward errors to -// the set instance instead of logging. -// -// Subsequent calls to SetErrorHandler after the first will not forward errors -// to the new ErrorHandler for prior returned instances. -func GetErrorHandler() ErrorHandler { - return globalErrorHandler -} - -// SetErrorHandler sets the global ErrorHandler to h. -// -// The first time this is called all ErrorHandler previously returned from -// GetErrorHandler will send errors to h instead of the default logging -// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not -// delegate errors to h. -func SetErrorHandler(h ErrorHandler) { - globalErrorHandler.setDelegate(h) -} - -// Handle is a convenience function for ErrorHandler().Handle(err). -func Handle(err error) { - GetErrorHandler().Handle(err) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go deleted file mode 100644 index 622c3ee3f..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package attribute provide several helper functions for some commonly used -logic of processing attributes. -*/ -package attribute // import "go.opentelemetry.io/otel/internal/attribute" - -import ( - "reflect" -) - -// BoolSliceValue converts a bool slice into an array with same elements as slice. -func BoolSliceValue(v []bool) interface{} { - var zero bool - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]bool), v) - return cp.Elem().Interface() -} - -// Int64SliceValue converts an int64 slice into an array with same elements as slice. -func Int64SliceValue(v []int64) interface{} { - var zero int64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]int64), v) - return cp.Elem().Interface() -} - -// Float64SliceValue converts a float64 slice into an array with same elements as slice. -func Float64SliceValue(v []float64) interface{} { - var zero float64 - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]float64), v) - return cp.Elem().Interface() -} - -// StringSliceValue converts a string slice into an array with same elements as slice. -func StringSliceValue(v []string) interface{} { - var zero string - cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) - copy(cp.Elem().Slice(0, len(v)).Interface().([]string), v) - return cp.Elem().Interface() -} - -// AsBoolSlice converts a bool array into a slice into with same elements as array. -func AsBoolSlice(v interface{}) []bool { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero bool - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]bool) -} - -// AsInt64Slice converts an int64 array into a slice into with same elements as array. -func AsInt64Slice(v interface{}) []int64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero int64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]int64) -} - -// AsFloat64Slice converts a float64 array into a slice into with same elements as array. -func AsFloat64Slice(v interface{}) []float64 { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero float64 - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]float64) -} - -// AsStringSlice converts a string array into a slice into with same elements as array. -func AsStringSlice(v interface{}) []string { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Array { - return nil - } - var zero string - correctLen := rv.Len() - correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) - cpy := reflect.New(correctType) - _ = reflect.Copy(cpy.Elem(), rv) - return cpy.Elem().Slice(0, correctLen).Interface().([]string) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go deleted file mode 100644 index b96e5408e..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package baggage provides base types and functionality to store and retrieve -baggage in Go context. This package exists because the OpenTracing bridge to -OpenTelemetry needs to synchronize state whenever baggage for a context is -modified and that context contains an OpenTracing span. If it were not for -this need this package would not need to exist and the -`go.opentelemetry.io/otel/baggage` package would be the singular place where -W3C baggage is handled. -*/ -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -// List is the collection of baggage members. The W3C allows for duplicates, -// but OpenTelemetry does not, therefore, this is represented as a map. -type List map[string]Item - -// Item is the value and metadata properties part of a list-member. -type Item struct { - Value string - Properties []Property -} - -// Property is a metadata entry for a list-member. -type Property struct { - Key, Value string - - // HasValue indicates if a zero-value value means the property does not - // have a value or if it was the zero-value. - HasValue bool -} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go deleted file mode 100644 index 4469700d9..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package baggage // import "go.opentelemetry.io/otel/internal/baggage" - -import "context" - -type baggageContextKeyType int - -const baggageKey baggageContextKeyType = iota - -// SetHookFunc is a callback called when storing baggage in the context. -type SetHookFunc func(context.Context, List) context.Context - -// GetHookFunc is a callback called when getting baggage from the context. -type GetHookFunc func(context.Context, List) List - -type baggageState struct { - list List - - setHook SetHookFunc - getHook GetHookFunc -} - -// ContextWithSetHook returns a copy of parent with hook configured to be -// invoked every time ContextWithBaggage is called. -// -// Passing nil SetHookFunc creates a context with no set hook to call. -func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.setHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithGetHook returns a copy of parent with hook configured to be -// invoked every time FromContext is called. -// -// Passing nil GetHookFunc creates a context with no get hook to call. -func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.getHook = hook - return context.WithValue(parent, baggageKey, s) -} - -// ContextWithList returns a copy of parent with baggage. Passing nil list -// returns a context without any baggage. -func ContextWithList(parent context.Context, list List) context.Context { - var s baggageState - if v, ok := parent.Value(baggageKey).(baggageState); ok { - s = v - } - - s.list = list - ctx := context.WithValue(parent, baggageKey, s) - if s.setHook != nil { - ctx = s.setHook(ctx, list) - } - - return ctx -} - -// ListFromContext returns the baggage contained in ctx. -func ListFromContext(ctx context.Context) List { - switch v := ctx.Value(baggageKey).(type) { - case baggageState: - if v.getHook != nil { - return v.getHook(ctx, v.list) - } - return v.list - default: - return nil - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go deleted file mode 100644 index 293c08961..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "log" - "os" - "sync/atomic" - "unsafe" - - "github.com/go-logr/logr" - "github.com/go-logr/stdr" -) - -// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals. -// -// The default logger uses stdr which is backed by the standard `log.Logger` -// interface. This logger will only show messages at the Error Level. -var globalLogger unsafe.Pointer - -func init() { - SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) -} - -// SetLogger overrides the globalLogger with l. -// -// To see Info messages use a logger with `l.V(1).Enabled() == true` -// To see Debug messages use a logger with `l.V(5).Enabled() == true`. -func SetLogger(l logr.Logger) { - atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) -} - -func getLogger() logr.Logger { - return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) -} - -// Info prints messages about the general state of the API or SDK. -// This should usually be less then 5 messages a minute. -func Info(msg string, keysAndValues ...interface{}) { - getLogger().V(1).Info(msg, keysAndValues...) -} - -// Error prints messages about exceptional states of the API or SDK. -func Error(err error, msg string, keysAndValues ...interface{}) { - getLogger().Error(err, msg, keysAndValues...) -} - -// Debug prints messages about all internal changes in the API or SDK. -func Debug(msg string, keysAndValues ...interface{}) { - getLogger().V(5).Info(msg, keysAndValues...) -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go deleted file mode 100644 index 06bac35c2..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel/propagation" -) - -// textMapPropagator is a default TextMapPropagator that delegates calls to a -// registered delegate if one is set, otherwise it defaults to delegating the -// calls to a the default no-op propagation.TextMapPropagator. -type textMapPropagator struct { - mtx sync.Mutex - once sync.Once - delegate propagation.TextMapPropagator - noop propagation.TextMapPropagator -} - -// Compile-time guarantee that textMapPropagator implements the -// propagation.TextMapPropagator interface. -var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) - -func newTextMapPropagator() *textMapPropagator { - return &textMapPropagator{ - noop: propagation.NewCompositeTextMapPropagator(), - } -} - -// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are -// forwarded to. Delegation can only be performed once, all subsequent calls -// perform no delegation. -func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { - if delegate == nil { - return - } - - p.mtx.Lock() - p.once.Do(func() { p.delegate = delegate }) - p.mtx.Unlock() -} - -// effectiveDelegate returns the current delegate of p if one is set, -// otherwise the default noop TextMapPropagator is returned. This method -// can be called concurrently. -func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { - p.mtx.Lock() - defer p.mtx.Unlock() - if p.delegate != nil { - return p.delegate - } - return p.noop -} - -// Inject set cross-cutting concerns from the Context into the carrier. -func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { - p.effectiveDelegate().Inject(ctx, carrier) -} - -// Extract reads cross-cutting concerns from the carrier into a Context. -func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { - return p.effectiveDelegate().Extract(ctx, carrier) -} - -// Fields returns the keys whose values are set with Inject. -func (p *textMapPropagator) Fields() []string { - return p.effectiveDelegate().Fields() -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go deleted file mode 100644 index 1ad38f828..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -import ( - "errors" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/propagation" - "go.opentelemetry.io/otel/trace" -) - -type ( - tracerProviderHolder struct { - tp trace.TracerProvider - } - - propagatorsHolder struct { - tm propagation.TextMapPropagator - } -) - -var ( - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - - delegateTraceOnce sync.Once - delegateTextMapPropagatorOnce sync.Once -) - -// TracerProvider is the internal implementation for global.TracerProvider. -func TracerProvider() trace.TracerProvider { - return globalTracer.Load().(tracerProviderHolder).tp -} - -// SetTracerProvider is the internal implementation for global.SetTracerProvider. -func SetTracerProvider(tp trace.TracerProvider) { - current := TracerProvider() - - if _, cOk := current.(*tracerProvider); cOk { - if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { - // Do not assign the default delegating TracerProvider to delegate - // to itself. - Error( - errors.New("no delegate configured in tracer provider"), - "Setting tracer provider to it's current value. No delegate will be configured", - ) - return - } - } - - delegateTraceOnce.Do(func() { - if def, ok := current.(*tracerProvider); ok { - def.setDelegate(tp) - } - }) - globalTracer.Store(tracerProviderHolder{tp: tp}) -} - -// TextMapPropagator is the internal implementation for global.TextMapPropagator. -func TextMapPropagator() propagation.TextMapPropagator { - return globalPropagators.Load().(propagatorsHolder).tm -} - -// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. -func SetTextMapPropagator(p propagation.TextMapPropagator) { - current := TextMapPropagator() - - if _, cOk := current.(*textMapPropagator); cOk { - if _, pOk := p.(*textMapPropagator); pOk && current == p { - // Do not assign the default delegating TextMapPropagator to - // delegate to itself. - Error( - errors.New("no delegate configured in text map propagator"), - "Setting text map propagator to it's current value. No delegate will be configured", - ) - return - } - } - - // For the textMapPropagator already returned by TextMapPropagator - // delegate to p. - delegateTextMapPropagatorOnce.Do(func() { - if def, ok := current.(*textMapPropagator); ok { - def.SetDelegate(p) - } - }) - // Return p when subsequent calls to TextMapPropagator are made. - globalPropagators.Store(propagatorsHolder{tm: p}) -} - -func defaultTracerValue() *atomic.Value { - v := &atomic.Value{} - v.Store(tracerProviderHolder{tp: &tracerProvider{}}) - return v -} - -func defaultPropagatorsValue() *atomic.Value { - v := &atomic.Value{} - v.Store(propagatorsHolder{tm: newTextMapPropagator()}) - return v -} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go deleted file mode 100644 index 5f008d098..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/global" - -/* -This file contains the forwarding implementation of the TracerProvider used as -the default global instance. Prior to initialization of an SDK, Tracers -returned by the global TracerProvider will provide no-op functionality. This -means that all Span created prior to initialization are no-op Spans. - -Once an SDK has been initialized, all provided no-op Tracers are swapped for -Tracers provided by the SDK defined TracerProvider. However, any Span started -prior to this initialization does not change its behavior. Meaning, the Span -remains a no-op Span. - -The implementation to track and swap Tracers locks all new Tracer creation -until the swap is complete. This assumes that this operation is not -performance-critical. If that assumption is incorrect, be sure to configure an -SDK prior to any Tracer creation. -*/ - -import ( - "context" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -// tracerProvider is a placeholder for a configured SDK TracerProvider. -// -// All TracerProvider functionality is forwarded to a delegate once -// configured. -type tracerProvider struct { - mtx sync.Mutex - tracers map[il]*tracer - delegate trace.TracerProvider -} - -// Compile-time guarantee that tracerProvider implements the TracerProvider -// interface. -var _ trace.TracerProvider = &tracerProvider{} - -// setDelegate configures p to delegate all TracerProvider functionality to -// provider. -// -// All Tracers provided prior to this function call are switched out to be -// Tracers provided by provider. -// -// It is guaranteed by the caller that this happens only once. -func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { - p.mtx.Lock() - defer p.mtx.Unlock() - - p.delegate = provider - - if len(p.tracers) == 0 { - return - } - - for _, t := range p.tracers { - t.setDelegate(provider) - } - - p.tracers = nil -} - -// Tracer implements TracerProvider. -func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - p.mtx.Lock() - defer p.mtx.Unlock() - - if p.delegate != nil { - return p.delegate.Tracer(name, opts...) - } - - // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. - - c := trace.NewTracerConfig(opts...) - key := il{ - name: name, - version: c.InstrumentationVersion(), - } - - if p.tracers == nil { - p.tracers = make(map[il]*tracer) - } - - if val, ok := p.tracers[key]; ok { - return val - } - - t := &tracer{name: name, opts: opts, provider: p} - p.tracers[key] = t - return t -} - -type il struct { - name string - version string -} - -// tracer is a placeholder for a trace.Tracer. -// -// All Tracer functionality is forwarded to a delegate once configured. -// Otherwise, all functionality is forwarded to a NoopTracer. -type tracer struct { - name string - opts []trace.TracerOption - provider *tracerProvider - - delegate atomic.Value -} - -// Compile-time guarantee that tracer implements the trace.Tracer interface. -var _ trace.Tracer = &tracer{} - -// setDelegate configures t to delegate all Tracer functionality to Tracers -// created by provider. -// -// All subsequent calls to the Tracer methods will be passed to the delegate. -// -// It is guaranteed by the caller that this happens only once. -func (t *tracer) setDelegate(provider trace.TracerProvider) { - t.delegate.Store(provider.Tracer(t.name, t.opts...)) -} - -// Start implements trace.Tracer by forwarding the call to t.delegate if -// set, otherwise it forwards the call to a NoopTracer. -func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { - delegate := t.delegate.Load() - if delegate != nil { - return delegate.(trace.Tracer).Start(ctx, name, opts...) - } - - s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} - ctx = trace.ContextWithSpan(ctx, s) - return ctx, s -} - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - sc trace.SpanContext - tracer *tracer -} - -var _ trace.Span = nonRecordingSpan{} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } - -// IsRecording always returns false. -func (nonRecordingSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (nonRecordingSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (nonRecordingSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (nonRecordingSpan) End(...trace.SpanEndOption) {} - -// RecordError does nothing. -func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} - -// AddEvent does nothing. -func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} - -// SetName does nothing. -func (nonRecordingSpan) SetName(string) {} - -func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go deleted file mode 100644 index e07e79400..000000000 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/internal" - -import ( - "math" - "unsafe" -) - -func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. - if b { - return 1 - } - return 0 -} - -func RawToBool(r uint64) bool { - return r != 0 -} - -func Int64ToRaw(i int64) uint64 { - return uint64(i) -} - -func RawToInt64(r uint64) int64 { - return int64(r) -} - -func Float64ToRaw(f float64) uint64 { - return math.Float64bits(f) -} - -func RawToFloat64(r uint64) float64 { - return math.Float64frombits(r) -} - -func RawPtrToFloat64Ptr(r *uint64) *float64 { - return (*float64)(unsafe.Pointer(r)) -} - -func RawPtrToInt64Ptr(r *uint64) *int64 { - return (*int64)(unsafe.Pointer(r)) -} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go deleted file mode 100644 index c4f8acd5d..000000000 --- a/vendor/go.opentelemetry.io/otel/internal_logging.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "github.com/go-logr/logr" - - "go.opentelemetry.io/otel/internal/global" -) - -// SetLogger configures the logger used internally to opentelemetry. -func SetLogger(logger logr.Logger) { - global.SetLogger(logger) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go deleted file mode 100644 index d29aaa32c..000000000 --- a/vendor/go.opentelemetry.io/otel/propagation.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/propagation" -) - -// GetTextMapPropagator returns the global TextMapPropagator. If none has been -// set, a No-Op TextMapPropagator is returned. -func GetTextMapPropagator() propagation.TextMapPropagator { - return global.TextMapPropagator() -} - -// SetTextMapPropagator sets propagator as the global TextMapPropagator. -func SetTextMapPropagator(propagator propagation.TextMapPropagator) { - global.SetTextMapPropagator(propagator) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go deleted file mode 100644 index 303cdf1cb..000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - - "go.opentelemetry.io/otel/baggage" -) - -const baggageHeader = "baggage" - -// Baggage is a propagator that supports the W3C Baggage format. -// -// This propagates user-defined baggage associated with a trace. The complete -// specification is defined at https://www.w3.org/TR/baggage/. -type Baggage struct{} - -var _ TextMapPropagator = Baggage{} - -// Inject sets baggage key-values from ctx into the carrier. -func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { - bStr := baggage.FromContext(ctx).String() - if bStr != "" { - carrier.Set(baggageHeader, bStr) - } -} - -// Extract returns a copy of parent with the baggage from the carrier added. -func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { - bStr := carrier.Get(baggageHeader) - if bStr == "" { - return parent - } - - bag, err := baggage.Parse(bStr) - if err != nil { - return parent - } - return baggage.ContextWithBaggage(parent, bag) -} - -// Fields returns the keys who's values are set with Inject. -func (b Baggage) Fields() []string { - return []string{baggageHeader} -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go deleted file mode 100644 index c119eb285..000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package propagation contains OpenTelemetry context propagators. - -OpenTelemetry propagators are used to extract and inject context data from and -into messages exchanged by applications. The propagator supported by this -package is the W3C Trace Context encoding -(https://www.w3.org/TR/trace-context/), and W3C Baggage -(https://www.w3.org/TR/baggage/). -*/ -package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go deleted file mode 100644 index c94438f73..000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "net/http" -) - -// TextMapCarrier is the storage medium used by a TextMapPropagator. -type TextMapCarrier interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Get returns the value associated with the passed key. - Get(key string) string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Set stores the key-value pair. - Set(key string, value string) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Keys lists the keys stored in this carrier. - Keys() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage -// medium for propagated key-value pairs. -type MapCarrier map[string]string - -// Compile time check that MapCarrier implements the TextMapCarrier. -var _ TextMapCarrier = MapCarrier{} - -// Get returns the value associated with the passed key. -func (c MapCarrier) Get(key string) string { - return c[key] -} - -// Set stores the key-value pair. -func (c MapCarrier) Set(key, value string) { - c[key] = value -} - -// Keys lists the keys stored in this carrier. -func (c MapCarrier) Keys() []string { - keys := make([]string, 0, len(c)) - for k := range c { - keys = append(keys, k) - } - return keys -} - -// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. -type HeaderCarrier http.Header - -// Get returns the value associated with the passed key. -func (hc HeaderCarrier) Get(key string) string { - return http.Header(hc).Get(key) -} - -// Set stores the key-value pair. -func (hc HeaderCarrier) Set(key string, value string) { - http.Header(hc).Set(key, value) -} - -// Keys lists the keys stored in this carrier. -func (hc HeaderCarrier) Keys() []string { - keys := make([]string, 0, len(hc)) - for k := range hc { - keys = append(keys, k) - } - return keys -} - -// TextMapPropagator propagates cross-cutting concerns as key-value text -// pairs within a carrier that travels in-band across process boundaries. -type TextMapPropagator interface { - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Inject set cross-cutting concerns from the Context into the carrier. - Inject(ctx context.Context, carrier TextMapCarrier) - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Extract reads cross-cutting concerns from the carrier into a Context. - Extract(ctx context.Context, carrier TextMapCarrier) context.Context - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. - - // Fields returns the keys whose values are set with Inject. - Fields() []string - // DO NOT CHANGE: any modification will not be backwards compatible and - // must never be done outside of a new major release. -} - -type compositeTextMapPropagator []TextMapPropagator - -func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { - for _, i := range p { - i.Inject(ctx, carrier) - } -} - -func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - for _, i := range p { - ctx = i.Extract(ctx, carrier) - } - return ctx -} - -func (p compositeTextMapPropagator) Fields() []string { - unique := make(map[string]struct{}) - for _, i := range p { - for _, k := range i.Fields() { - unique[k] = struct{}{} - } - } - - fields := make([]string, 0, len(unique)) - for k := range unique { - fields = append(fields, k) - } - return fields -} - -// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the -// group of passed TextMapPropagator. This allows different cross-cutting -// concerns to be propagates in a unified manner. -// -// The returned TextMapPropagator will inject and extract cross-cutting -// concerns in the order the TextMapPropagators were provided. Additionally, -// the Fields method will return a de-duplicated slice of the keys that are -// set with the Inject method. -func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { - return compositeTextMapPropagator(p) -} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go deleted file mode 100644 index 902692da0..000000000 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation // import "go.opentelemetry.io/otel/propagation" - -import ( - "context" - "encoding/hex" - "fmt" - "regexp" - - "go.opentelemetry.io/otel/trace" -) - -const ( - supportedVersion = 0 - maxVersion = 254 - traceparentHeader = "traceparent" - tracestateHeader = "tracestate" -) - -// TraceContext is a propagator that supports the W3C Trace Context format -// (https://www.w3.org/TR/trace-context/) -// -// This propagator will propagate the traceparent and tracestate headers to -// guarantee traces are not broken. It is up to the users of this propagator -// to choose if they want to participate in a trace by modifying the -// traceparent header and relevant parts of the tracestate header containing -// their proprietary information. -type TraceContext struct{} - -var _ TextMapPropagator = TraceContext{} -var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") - -// Inject set tracecontext from the Context into the carrier. -func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { - sc := trace.SpanContextFromContext(ctx) - if !sc.IsValid() { - return - } - - if ts := sc.TraceState().String(); ts != "" { - carrier.Set(tracestateHeader, ts) - } - - // Clear all flags other than the trace-context supported sampling bit. - flags := sc.TraceFlags() & trace.FlagsSampled - - h := fmt.Sprintf("%.2x-%s-%s-%s", - supportedVersion, - sc.TraceID(), - sc.SpanID(), - flags) - carrier.Set(traceparentHeader, h) -} - -// Extract reads tracecontext from the carrier into a returned Context. -// -// The returned Context will be a copy of ctx and contain the extracted -// tracecontext as the remote SpanContext. If the extracted tracecontext is -// invalid, the passed ctx will be returned directly instead. -func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { - sc := tc.extract(carrier) - if !sc.IsValid() { - return ctx - } - return trace.ContextWithRemoteSpanContext(ctx, sc) -} - -func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { - h := carrier.Get(traceparentHeader) - if h == "" { - return trace.SpanContext{} - } - - matches := traceCtxRegExp.FindStringSubmatch(h) - - if len(matches) == 0 { - return trace.SpanContext{} - } - - if len(matches) < 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[1]) != 2 { - return trace.SpanContext{} - } - ver, err := hex.DecodeString(matches[1]) - if err != nil { - return trace.SpanContext{} - } - version := int(ver[0]) - if version > maxVersion { - return trace.SpanContext{} - } - - if version == 0 && len(matches) != 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[2]) != 32 { - return trace.SpanContext{} - } - - var scc trace.SpanContextConfig - - scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) - if err != nil { - return trace.SpanContext{} - } - - if len(matches[3]) != 16 { - return trace.SpanContext{} - } - scc.SpanID, err = trace.SpanIDFromHex(matches[3]) - if err != nil { - return trace.SpanContext{} - } - - if len(matches[4]) != 2 { - return trace.SpanContext{} - } - opts, err := hex.DecodeString(matches[4]) - if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { - return trace.SpanContext{} - } - // Clear all flags other than the trace-context supported sampling bit. - scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled - - // Ignore the error returned here. Failure to parse tracestate MUST NOT - // affect the parsing of traceparent according to the W3C tracecontext - // specification. - scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) - scc.Remote = true - - sc := trace.NewSpanContext(scc) - if !sc.IsValid() { - return trace.SpanContext{} - } - - return sc -} - -// Fields returns the keys who's values are set with Inject. -func (tc TraceContext) Fields() []string { - return []string{traceparentHeader, tracestateHeader} -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go deleted file mode 100644 index b580eedef..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/semconv/internal" - -import ( - "fmt" - "net" - "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/trace" -) - -// SemanticConventions are the semantic convention values defined for a -// version of the OpenTelemetry specification. -type SemanticConventions struct { - EnduserIDKey attribute.Key - HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key - HTTPHostKey attribute.Key - HTTPMethodKey attribute.Key - HTTPRequestContentLengthKey attribute.Key - HTTPRouteKey attribute.Key - HTTPSchemeHTTP attribute.KeyValue - HTTPSchemeHTTPS attribute.KeyValue - HTTPServerNameKey attribute.Key - HTTPStatusCodeKey attribute.Key - HTTPTargetKey attribute.Key - HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key - NetHostIPKey attribute.Key - NetHostNameKey attribute.Key - NetHostPortKey attribute.Key - NetPeerIPKey attribute.Key - NetPeerNameKey attribute.Key - NetPeerPortKey attribute.Key - NetTransportIP attribute.KeyValue - NetTransportOther attribute.KeyValue - NetTransportTCP attribute.KeyValue - NetTransportUDP attribute.KeyValue - NetTransportUnix attribute.KeyValue -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, sc.NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, sc.NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, sc.NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, sc.NetTransportUnix) - default: - attrs = append(attrs, sc.NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, sc.NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) - } - return -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{sc.EnduserIDKey.String(username)} - } - return nil -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, sc.HTTPSchemeHTTPS) - } else { - attrs = append(attrs, sc.HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) - } else if request.URL != nil && request.URL.Host != "" { - attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) - } - - flavor := "" - if request.ProtoMajor == 1 { - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - } else if request.ProtoMajor == 2 { - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) - } - - if request.Method != "" { - attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) - } - - return attrs -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, sc.HTTPRouteKey.String(route)) - } - if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { - if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { - attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) - } - } - - return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - sc.HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// validateHTTPStatusCode validates the HTTP status code and returns -// corresponding span status code. If the `code` is not a valid HTTP status -// code, returns span status Error and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go deleted file mode 100644 index 181fcc9c5..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package semconv implements OpenTelemetry semantic conventions. -// -// OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.12.0 version of the OpenTelemetry specification. -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go deleted file mode 100644 index d68927094..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -const ( - // ExceptionEventName is the name of the Span event representing an exception. - ExceptionEventName = "exception" -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go deleted file mode 100644 index 4b4f3cbaf..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -import ( - "net/http" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/semconv/internal" - "go.opentelemetry.io/otel/trace" -) - -// HTTP scheme attributes. -var ( - HTTPSchemeHTTP = HTTPSchemeKey.String("http") - HTTPSchemeHTTPS = HTTPSchemeKey.String("https") -) - -var sc = &internal.SemanticConventions{ - EnduserIDKey: EnduserIDKey, - HTTPClientIPKey: HTTPClientIPKey, - HTTPFlavorKey: HTTPFlavorKey, - HTTPHostKey: HTTPHostKey, - HTTPMethodKey: HTTPMethodKey, - HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, - HTTPRouteKey: HTTPRouteKey, - HTTPSchemeHTTP: HTTPSchemeHTTP, - HTTPSchemeHTTPS: HTTPSchemeHTTPS, - HTTPServerNameKey: HTTPServerNameKey, - HTTPStatusCodeKey: HTTPStatusCodeKey, - HTTPTargetKey: HTTPTargetKey, - HTTPURLKey: HTTPURLKey, - HTTPUserAgentKey: HTTPUserAgentKey, - NetHostIPKey: NetHostIPKey, - NetHostNameKey: NetHostNameKey, - NetHostPortKey: NetHostPortKey, - NetPeerIPKey: NetPeerIPKey, - NetPeerNameKey: NetPeerNameKey, - NetPeerPortKey: NetPeerPortKey, - NetTransportIP: NetTransportIP, - NetTransportOther: NetTransportOther, - NetTransportTCP: NetTransportTCP, - NetTransportUDP: NetTransportUDP, - NetTransportUnix: NetTransportUnix, -} - -// NetAttributesFromHTTPRequest generates attributes of the net -// namespace as specified by the OpenTelemetry specification for a -// span. The network parameter is a string that net.Dial function -// from standard library can understand. -func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - return sc.NetAttributesFromHTTPRequest(network, request) -} - -// EndUserAttributesFromHTTPRequest generates attributes of the -// enduser namespace as specified by the OpenTelemetry specification -// for a span. -func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.EndUserAttributesFromHTTPRequest(request) -} - -// HTTPClientAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the client side. -func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - return sc.HTTPClientAttributesFromHTTPRequest(request) -} - -// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes -// to be used with server-side HTTP metrics. -func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) -} - -// HTTPServerAttributesFromHTTPRequest generates attributes of the -// http namespace as specified by the OpenTelemetry specification for -// a span on the server side. Currently, only basic authentication is -// supported. -func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) -} - -// HTTPAttributesFromHTTPStatusCode generates attributes of the http -// namespace as specified by the OpenTelemetry specification for a -// span. -func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - return sc.HTTPAttributesFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCode generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCode(code) -} - -// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message -// as specified by the OpenTelemetry specification for a span. -// Exclude 4xx for SERVER to set the appropriate status. -func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) -} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go deleted file mode 100644 index b2155676f..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go +++ /dev/null @@ -1,1042 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -import "go.opentelemetry.io/otel/attribute" - -// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). -const ( - // Array of brand name and version separated by a space - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (navigator.userAgentData.brands). - BrowserBrandsKey = attribute.Key("browser.brands") - // The platform on which the browser is running - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Windows', 'macOS', 'Android' - // Note: This value is intended to be taken from the [UA client hints - // API](https://wicg.github.io/ua-client-hints/#interface) - // (navigator.userAgentData.platform). If unavailable, the legacy - // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD - // be left unset in order for the values to be consistent. - // The list of possible values is defined in the [W3C User-Agent Client Hints - // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). - // Note that some (but not all) of these values can overlap with values in the - // [os.type and os.name attributes](./os.md). However, for consistency, the values - // in the `browser.platform` attribute should capture the exact value that the - // user agent provides. - BrowserPlatformKey = attribute.Key("browser.platform") - // Full user-agent string provided by the browser - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 - // (KHTML, ' - // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' - // Note: The user-agent value SHOULD be provided only from browsers that do not - // have a mechanism to retrieve brands and platform individually from the User- - // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` - // API can be used. - BrowserUserAgentKey = attribute.Key("browser.user_agent") -) - -// A cloud environment (e.g. GCP, Azure, AWS) -const ( - // Name of the cloud provider. - // - // Type: Enum - // Required: No - // Stability: stable - CloudProviderKey = attribute.Key("cloud.provider") - // The cloud account ID the resource is assigned to. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '111111111111', 'opentelemetry' - CloudAccountIDKey = attribute.Key("cloud.account.id") - // The geographical region the resource is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-central1', 'us-east-1' - // Note: Refer to your provider's docs to see the available regions, for example - // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- - // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- - // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- - // us/global-infrastructure/geographies/), [Google Cloud - // regions](https://cloud.google.com/about/locations), or [Tencent Cloud - // regions](https://intl.cloud.tencent.com/document/product/213/6091). - CloudRegionKey = attribute.Key("cloud.region") - // Cloud regions often have multiple, isolated locations known as zones to - // increase availability. Availability zone represents the zone where the resource - // is running. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-east-1c' - // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. - CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") - // The cloud platform in use. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: The prefix of the service SHOULD match the one specified in - // `cloud.provider`. - CloudPlatformKey = attribute.Key("cloud.platform") -) - -var ( - // Alibaba Cloud - CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") - // Amazon Web Services - CloudProviderAWS = CloudProviderKey.String("aws") - // Microsoft Azure - CloudProviderAzure = CloudProviderKey.String("azure") - // Google Cloud Platform - CloudProviderGCP = CloudProviderKey.String("gcp") - // Tencent Cloud - CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") -) - -var ( - // Alibaba Cloud Elastic Compute Service - CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") - // Alibaba Cloud Function Compute - CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") - // AWS Elastic Compute Cloud - CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") - // AWS Elastic Container Service - CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") - // AWS Elastic Kubernetes Service - CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") - // AWS Lambda - CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") - // AWS Elastic Beanstalk - CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") - // AWS App Runner - CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") - // Azure Virtual Machines - CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") - // Azure Container Instances - CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") - // Azure Kubernetes Service - CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") - // Azure Functions - CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") - // Azure App Service - CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") - // Google Cloud Compute Engine (GCE) - CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") - // Google Cloud Run - CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") - // Google Cloud Kubernetes Engine (GKE) - CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") - // Google Cloud Functions (GCF) - CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") - // Google Cloud App Engine (GAE) - CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") - // Tencent Cloud Cloud Virtual Machine (CVM) - CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") - // Tencent Cloud Elastic Kubernetes Service (EKS) - CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") - // Tencent Cloud Serverless Cloud Function (SCF) - CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") -) - -// Resources used by AWS Elastic Container Service (ECS). -const ( - // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. - // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' - AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") - // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo - // perguide/clusters.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") - // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l - // aunch_types.html) for an ECS task. - // - // Type: Enum - // Required: No - // Stability: stable - AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") - // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates - // t/developerguide/task_definitions.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us- - // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") - // The task definition family this task definition is a member of. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-family' - AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") - // The revision for this task definition. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '8', '26' - AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") -) - -var ( - // ec2 - AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") - // fargate - AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") -) - -// Resources used by AWS Elastic Kubernetes Service (EKS). -const ( - // The ARN of an EKS cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' - AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") -) - -// Resources specific to Amazon Web Services. -const ( - // The name(s) of the AWS log group(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '/aws/lambda/my-function', 'opentelemetry-service' - // Note: Multiple log groups must be supported for cases like multi-container - // applications, where a single application has sidecar containers, and each write - // to their own log group. - AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") - // The Amazon Resource Name(s) (ARN) of the AWS log group(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' - // Note: See the [log group ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). - AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") - // The name(s) of the AWS log stream(s) an application is writing to. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") - // The ARN(s) of the AWS log stream(s). - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- - // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' - // Note: See the [log stream ARN format - // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- - // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain - // several log streams, so these ARNs necessarily identify both a log group and a - // log stream. - AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") -) - -// A container instance. -const ( - // Container name used by container runtime. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-autoconf' - ContainerNameKey = attribute.Key("container.name") - // Container ID. Usually a UUID, as for example used to [identify Docker - // containers](https://docs.docker.com/engine/reference/run/#container- - // identification). The UUID might be abbreviated. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'a3bf90e006b2' - ContainerIDKey = attribute.Key("container.id") - // The container runtime managing this container. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'docker', 'containerd', 'rkt' - ContainerRuntimeKey = attribute.Key("container.runtime") - // Name of the image the container was built on. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'gcr.io/opentelemetry/operator' - ContainerImageNameKey = attribute.Key("container.image.name") - // Container image tag. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - ContainerImageTagKey = attribute.Key("container.image.tag") -) - -// The software deployment. -const ( - // Name of the [deployment - // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka - // deployment tier). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'staging', 'production' - DeploymentEnvironmentKey = attribute.Key("deployment.environment") -) - -// The device on which the process represented by this resource is running. -const ( - // A unique identifier representing the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' - // Note: The device identifier MUST only be defined using the values outlined - // below. This value is not an advertising identifier and MUST NOT be used as - // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id - // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden - // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the - // Firebase Installation ID or a globally unique UUID which is persisted across - // sessions in your application. More information can be found - // [here](https://developer.android.com/training/articles/user-data-ids) on best - // practices and exact implementation details. Caution should be taken when - // storing personal data or anything which can identify a user. GDPR and data - // protection laws may apply, ensure you do your own due diligence. - DeviceIDKey = attribute.Key("device.id") - // The model identifier for the device - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone3,4', 'SM-G920F' - // Note: It's recommended this value represents a machine readable version of the - // model identifier rather than the market or consumer-friendly name of the - // device. - DeviceModelIdentifierKey = attribute.Key("device.model.identifier") - // The marketing name for the device model - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' - // Note: It's recommended this value represents a human readable version of the - // device model rather than a machine readable alternative. - DeviceModelNameKey = attribute.Key("device.model.name") - // The name of the device manufacturer - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Apple', 'Samsung' - // Note: The Android OS provides this field via - // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). - // iOS apps SHOULD hardcode the value `Apple`. - DeviceManufacturerKey = attribute.Key("device.manufacturer") -) - -// A serverless instance. -const ( - // The name of the single function that this runtime instance executes. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function', 'myazurefunctionapp/some-function-name' - // Note: This is the name of the function as configured/deployed on the FaaS - // platform and is usually different from the name of the callback - // function (which may be stored in the - // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- - // general.md#source-code-attributes) - // span attributes). - - // For some cloud providers, the above definition is ambiguous. The following - // definition of function name MUST be used for this attribute - // (and consequently the span name) for the listed cloud providers/products: - - // * **Azure:** The full name `/`, i.e., function app name - // followed by a forward slash followed by the function name (this form - // can also be seen in the resource JSON for the function). - // This means that a span attribute MUST be used, as an Azure function - // app can host multiple functions that would usually share - // a TracerProvider (see also the `faas.id` attribute). - FaaSNameKey = attribute.Key("faas.name") - // The unique ID of the single function that this runtime instance executes. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' - // Note: On some cloud providers, it may not be possible to determine the full ID - // at startup, - // so consider setting `faas.id` as a span attribute instead. - - // The exact value to use for `faas.id` depends on the cloud provider: - - // * **AWS Lambda:** The function - // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- - // namespaces.html). - // Take care not to use the "invoked ARN" directly but replace any - // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // aliases.html) - // with the resolved function version, as the same runtime instance may be - // invokable with - // multiple different aliases. - // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- - // resource-names) - // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- - // us/rest/api/resources/resources/get-by-id) of the invoked function, - // *not* the function app, having the form - // `/subscriptions//resourceGroups//providers/Microsoft.We - // b/sites//functions/`. - // This means that a span attribute MUST be used, as an Azure function app can - // host multiple functions that would usually share - // a TracerProvider. - FaaSIDKey = attribute.Key("faas.id") - // The immutable version of the function being executed. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '26', 'pinkfroid-00002' - // Note: Depending on the cloud provider and platform, use: - - // * **AWS Lambda:** The [function - // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- - // versions.html) - // (an integer represented as a decimal string). - // * **Google Cloud Run:** The - // [revision](https://cloud.google.com/run/docs/managing/revisions) - // (i.e., the function name plus the revision suffix). - // * **Google Cloud Functions:** The value of the - // [`K_REVISION` environment - // variable](https://cloud.google.com/functions/docs/env- - // var#runtime_environment_variables_set_automatically). - // * **Azure Functions:** Not applicable. Do not set this attribute. - FaaSVersionKey = attribute.Key("faas.version") - // The execution environment ID as a string, that will be potentially reused for - // other invocations to the same function/function version. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' - // Note: * **AWS Lambda:** Use the (full) log stream name. - FaaSInstanceKey = attribute.Key("faas.instance") - // The amount of memory available to the serverless function in MiB. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 128 - // Note: It's recommended to set this attribute since e.g. too little memory can - // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, - // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this - // information. - FaaSMaxMemoryKey = attribute.Key("faas.max_memory") -) - -// A host is defined as a general computing instance. -const ( - // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud - // provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostIDKey = attribute.Key("host.id") - // Name of the host. On Unix systems, it may contain what the hostname command - // returns, or the fully qualified hostname, or another name specified by the - // user. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-test' - HostNameKey = attribute.Key("host.name") - // Type of host. For Cloud, this must be the machine type. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'n1-standard-1' - HostTypeKey = attribute.Key("host.type") - // The CPU architecture the host system is running on. - // - // Type: Enum - // Required: No - // Stability: stable - HostArchKey = attribute.Key("host.arch") - // Name of the VM image or OS install the host was instantiated from. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' - HostImageNameKey = attribute.Key("host.image.name") - // VM image ID. For Cloud, this value is from the provider. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ami-07b06b442921831e5' - HostImageIDKey = attribute.Key("host.image.id") - // The version string of the VM image as defined in [Version - // Attributes](README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.1' - HostImageVersionKey = attribute.Key("host.image.version") -) - -var ( - // AMD64 - HostArchAMD64 = HostArchKey.String("amd64") - // ARM32 - HostArchARM32 = HostArchKey.String("arm32") - // ARM64 - HostArchARM64 = HostArchKey.String("arm64") - // Itanium - HostArchIA64 = HostArchKey.String("ia64") - // 32-bit PowerPC - HostArchPPC32 = HostArchKey.String("ppc32") - // 64-bit PowerPC - HostArchPPC64 = HostArchKey.String("ppc64") - // IBM z/Architecture - HostArchS390x = HostArchKey.String("s390x") - // 32-bit x86 - HostArchX86 = HostArchKey.String("x86") -) - -// A Kubernetes Cluster. -const ( - // The name of the cluster. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-cluster' - K8SClusterNameKey = attribute.Key("k8s.cluster.name") -) - -// A Kubernetes Node object. -const ( - // The name of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'node-1' - K8SNodeNameKey = attribute.Key("k8s.node.name") - // The UID of the Node. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' - K8SNodeUIDKey = attribute.Key("k8s.node.uid") -) - -// A Kubernetes Namespace. -const ( - // The name of the namespace that the pod is running in. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'default' - K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") -) - -// A Kubernetes Pod object. -const ( - // The UID of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SPodUIDKey = attribute.Key("k8s.pod.uid") - // The name of the Pod. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry-pod-autoconf' - K8SPodNameKey = attribute.Key("k8s.pod.name") -) - -// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). -const ( - // The name of the Container from Pod specification, must be unique within a Pod. - // Container runtime usually uses different globally unique name - // (`container.name`). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'redis' - K8SContainerNameKey = attribute.Key("k8s.container.name") - // Number of times the container was restarted. This attribute can be used to - // identify a particular container (running or stopped) within a container spec. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") -) - -// A Kubernetes ReplicaSet object. -const ( - // The UID of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") - // The name of the ReplicaSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") -) - -// A Kubernetes Deployment object. -const ( - // The UID of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") - // The name of the Deployment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") -) - -// A Kubernetes StatefulSet object. -const ( - // The UID of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") - // The name of the StatefulSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") -) - -// A Kubernetes DaemonSet object. -const ( - // The UID of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") - // The name of the DaemonSet. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") -) - -// A Kubernetes Job object. -const ( - // The UID of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SJobUIDKey = attribute.Key("k8s.job.uid") - // The name of the Job. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SJobNameKey = attribute.Key("k8s.job.name") -) - -// A Kubernetes CronJob object. -const ( - // The UID of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' - K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") - // The name of the CronJob. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") -) - -// The operating system (OS) on which the process represented by this resource is running. -const ( - // The operating system type. - // - // Type: Enum - // Required: Always - // Stability: stable - OSTypeKey = attribute.Key("os.type") - // Human readable (not intended to be parsed) OS version information, like e.g. - // reported by `ver` or `lsb_release -a` commands. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' - OSDescriptionKey = attribute.Key("os.description") - // Human readable operating system name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'iOS', 'Android', 'Ubuntu' - OSNameKey = attribute.Key("os.name") - // The version string of the operating system as defined in [Version - // Attributes](../../resource/semantic_conventions/README.md#version-attributes). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.2.1', '18.04.1' - OSVersionKey = attribute.Key("os.version") -) - -var ( - // Microsoft Windows - OSTypeWindows = OSTypeKey.String("windows") - // Linux - OSTypeLinux = OSTypeKey.String("linux") - // Apple Darwin - OSTypeDarwin = OSTypeKey.String("darwin") - // FreeBSD - OSTypeFreeBSD = OSTypeKey.String("freebsd") - // NetBSD - OSTypeNetBSD = OSTypeKey.String("netbsd") - // OpenBSD - OSTypeOpenBSD = OSTypeKey.String("openbsd") - // DragonFly BSD - OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") - // HP-UX (Hewlett Packard Unix) - OSTypeHPUX = OSTypeKey.String("hpux") - // AIX (Advanced Interactive eXecutive) - OSTypeAIX = OSTypeKey.String("aix") - // SunOS, Oracle Solaris - OSTypeSolaris = OSTypeKey.String("solaris") - // IBM z/OS - OSTypeZOS = OSTypeKey.String("z_os") -) - -// An operating system process. -const ( - // Process identifier (PID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 1234 - ProcessPIDKey = attribute.Key("process.pid") - // The name of the process executable. On Linux based systems, can be set to the - // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'otelcol' - ProcessExecutableNameKey = attribute.Key("process.executable.name") - // The full path to the process executable. On Linux based systems, can be set to - // the target of `proc/[pid]/exe`. On Windows, can be set to the result of - // `GetProcessImageFileNameW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: '/usr/bin/cmd/otelcol' - ProcessExecutablePathKey = attribute.Key("process.executable.path") - // The command used to launch the process (i.e. the command name). On Linux based - // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, - // can be set to the first parameter extracted from `GetCommandLineW`. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'cmd/otelcol' - ProcessCommandKey = attribute.Key("process.command") - // The full command used to launch the process as a single string representing the - // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not - // set this if you have to assemble it just for monitoring; use - // `process.command_args` instead. - // - // Type: string - // Required: See below - // Stability: stable - // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' - ProcessCommandLineKey = attribute.Key("process.command_line") - // All the command arguments (including the command/executable itself) as received - // by the process. On Linux-based systems (and some other Unixoid systems - // supporting procfs), can be set according to the list of null-delimited strings - // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be - // the full argv vector passed to `main`. - // - // Type: string[] - // Required: See below - // Stability: stable - // Examples: 'cmd/otecol', '--config=config.yaml' - ProcessCommandArgsKey = attribute.Key("process.command_args") - // The username of the user that owns the process. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'root' - ProcessOwnerKey = attribute.Key("process.owner") -) - -// The single (language) runtime instance which is monitored. -const ( - // The name of the runtime of this process. For compiled native binaries, this - // SHOULD be the name of the compiler. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'OpenJDK Runtime Environment' - ProcessRuntimeNameKey = attribute.Key("process.runtime.name") - // The version of the runtime of this process, as returned by the runtime without - // modification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '14.0.2' - ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") - // An additional description about the runtime of the process, for example a - // specific vendor customization of the runtime environment. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' - ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") -) - -// A service instance. -const ( - // Logical name of the service. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'shoppingcart' - // Note: MUST be the same for all instances of horizontally scaled services. If - // the value was not specified, SDKs MUST fallback to `unknown_service:` - // concatenated with [`process.executable.name`](process.md#process), e.g. - // `unknown_service:bash`. If `process.executable.name` is not available, the - // value MUST be set to `unknown_service`. - ServiceNameKey = attribute.Key("service.name") - // A namespace for `service.name`. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Shop' - // Note: A string value having a meaning that helps to distinguish a group of - // services, for example the team name that owns a group of services. - // `service.name` is expected to be unique within the same namespace. If - // `service.namespace` is not specified in the Resource then `service.name` is - // expected to be unique for all services that have no explicit namespace defined - // (so the empty/unspecified namespace is simply one more valid namespace). Zero- - // length namespace string is assumed equal to unspecified namespace. - ServiceNamespaceKey = attribute.Key("service.namespace") - // The string ID of the service instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '627cc493-f310-47de-96bd-71410b7dec09' - // Note: MUST be unique for each instance of the same - // `service.namespace,service.name` pair (in other words - // `service.namespace,service.name,service.instance.id` triplet MUST be globally - // unique). The ID helps to distinguish instances of the same service that exist - // at the same time (e.g. instances of a horizontally scaled service). It is - // preferable for the ID to be persistent and stay the same for the lifetime of - // the service instance, however it is acceptable that the ID is ephemeral and - // changes during important lifetime events for the service (e.g. service - // restarts). If the service has no inherent unique ID that can be used as the - // value of this attribute it is recommended to generate a random Version 1 or - // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use - // Version 5, see RFC 4122 for more recommendations). - ServiceInstanceIDKey = attribute.Key("service.instance.id") - // The version string of the service API or implementation. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '2.0.0' - ServiceVersionKey = attribute.Key("service.version") -) - -// The telemetry SDK used to capture data recorded by the instrumentation libraries. -const ( - // The name of the telemetry SDK as defined above. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'opentelemetry' - TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") - // The language of the telemetry SDK. - // - // Type: Enum - // Required: No - // Stability: stable - TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") - // The version string of the telemetry SDK. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") - // The version string of the auto instrumentation agent, if used. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '1.2.3' - TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") -) - -var ( - // cpp - TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") - // dotnet - TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") - // erlang - TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") - // go - TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") - // java - TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") - // nodejs - TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") - // php - TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") - // python - TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") - // ruby - TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") - // webjs - TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") - // swift - TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") -) - -// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. -const ( - // The name of the web engine. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'WildFly' - WebEngineNameKey = attribute.Key("webengine.name") - // The version of the web engine. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '21.0.0' - WebEngineVersionKey = attribute.Key("webengine.version") - // Additional description of the web engine (e.g. detailed version and edition - // information). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' - WebEngineDescriptionKey = attribute.Key("webengine.description") -) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go deleted file mode 100644 index 2f2a019e4..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -// SchemaURL is the schema URL that matches the version of the semantic conventions -// that this package defines. Semconv packages starting from v1.4.0 must declare -// non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go deleted file mode 100644 index 047d8e95c..000000000 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go +++ /dev/null @@ -1,1704 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated from semantic convention specification. DO NOT EDIT. - -package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" - -import "go.opentelemetry.io/otel/attribute" - -// Span attributes used by AWS Lambda (in addition to general `faas` attributes). -const ( - // The full invoked ARN as provided on the `Context` passed to the function - // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` - // applicable). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' - // Note: This may be different from `faas.id` if an alias is involved. - AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") -) - -// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. -const ( - // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec - // .md#id) uniquely identifies the event. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' - CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") - // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m - // d#source-1) identifies the context in which an event happened. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- - // service' - CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") - // The [version of the CloudEvents specification](https://github.com/cloudevents/s - // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '1.0' - CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") - // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp - // ec.md#type) contains a value describing the type of event related to the - // originating occurrence. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' - CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") - // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. - // md#subject) of the event in the context of the event producer (identified by - // source). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'mynewfile.jpg' - CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") -) - -// This document defines semantic conventions for the OpenTracing Shim -const ( - // Parent-child Reference type - // - // Type: Enum - // Required: No - // Stability: stable - // Note: The causal relationship between a child Span and a parent Span. - OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") -) - -var ( - // The parent Span depends on the child Span in some capacity - OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") - // The parent Span does not depend in any way on the result of the child Span - OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") -) - -// This document defines the attributes used to perform database client calls. -const ( - // An identifier for the database management system (DBMS) product being used. See - // below for a list of well-known identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - DBSystemKey = attribute.Key("db.system") - // The connection string used to connect to the database. It is recommended to - // remove embedded credentials. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' - DBConnectionStringKey = attribute.Key("db.connection_string") - // Username for accessing the database. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'readonly_user', 'reporting_user' - DBUserKey = attribute.Key("db.user") - // The fully-qualified class name of the [Java Database Connectivity - // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver - // used to connect. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'org.postgresql.Driver', - // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' - DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") - // This attribute is used to report the name of the database being accessed. For - // commands that switch the database, this should be set to the target database - // (even if the command fails). - // - // Type: string - // Required: Required, if applicable. - // Stability: stable - // Examples: 'customers', 'main' - // Note: In some SQL databases, the database name to be used is called "schema - // name". In case there are multiple layers that could be considered for database - // name (e.g. Oracle instance name and schema name), the database name to be used - // is the more specific layer (e.g. Oracle schema name). - DBNameKey = attribute.Key("db.name") - // The database statement being executed. - // - // Type: string - // Required: Required if applicable and not explicitly disabled via - // instrumentation configuration. - // Stability: stable - // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' - // Note: The value may be sanitized to exclude sensitive information. - DBStatementKey = attribute.Key("db.statement") - // The name of the operation being executed, e.g. the [MongoDB command - // name](https://docs.mongodb.com/manual/reference/command/#database-operations) - // such as `findAndModify`, or the SQL keyword. - // - // Type: string - // Required: Required, if `db.statement` is not applicable. - // Stability: stable - // Examples: 'findAndModify', 'HMSET', 'SELECT' - // Note: When setting this to an SQL keyword, it is not recommended to attempt any - // client-side parsing of `db.statement` just to get this property, but it should - // be set if the operation name is provided by the library being instrumented. If - // the SQL statement has an ambiguous operation, or performs more than one - // operation, this value may be omitted. - DBOperationKey = attribute.Key("db.operation") -) - -var ( - // Some other SQL database. Fallback only. See notes - DBSystemOtherSQL = DBSystemKey.String("other_sql") - // Microsoft SQL Server - DBSystemMSSQL = DBSystemKey.String("mssql") - // MySQL - DBSystemMySQL = DBSystemKey.String("mysql") - // Oracle Database - DBSystemOracle = DBSystemKey.String("oracle") - // IBM DB2 - DBSystemDB2 = DBSystemKey.String("db2") - // PostgreSQL - DBSystemPostgreSQL = DBSystemKey.String("postgresql") - // Amazon Redshift - DBSystemRedshift = DBSystemKey.String("redshift") - // Apache Hive - DBSystemHive = DBSystemKey.String("hive") - // Cloudscape - DBSystemCloudscape = DBSystemKey.String("cloudscape") - // HyperSQL DataBase - DBSystemHSQLDB = DBSystemKey.String("hsqldb") - // Progress Database - DBSystemProgress = DBSystemKey.String("progress") - // SAP MaxDB - DBSystemMaxDB = DBSystemKey.String("maxdb") - // SAP HANA - DBSystemHanaDB = DBSystemKey.String("hanadb") - // Ingres - DBSystemIngres = DBSystemKey.String("ingres") - // FirstSQL - DBSystemFirstSQL = DBSystemKey.String("firstsql") - // EnterpriseDB - DBSystemEDB = DBSystemKey.String("edb") - // InterSystems Caché - DBSystemCache = DBSystemKey.String("cache") - // Adabas (Adaptable Database System) - DBSystemAdabas = DBSystemKey.String("adabas") - // Firebird - DBSystemFirebird = DBSystemKey.String("firebird") - // Apache Derby - DBSystemDerby = DBSystemKey.String("derby") - // FileMaker - DBSystemFilemaker = DBSystemKey.String("filemaker") - // Informix - DBSystemInformix = DBSystemKey.String("informix") - // InstantDB - DBSystemInstantDB = DBSystemKey.String("instantdb") - // InterBase - DBSystemInterbase = DBSystemKey.String("interbase") - // MariaDB - DBSystemMariaDB = DBSystemKey.String("mariadb") - // Netezza - DBSystemNetezza = DBSystemKey.String("netezza") - // Pervasive PSQL - DBSystemPervasive = DBSystemKey.String("pervasive") - // PointBase - DBSystemPointbase = DBSystemKey.String("pointbase") - // SQLite - DBSystemSqlite = DBSystemKey.String("sqlite") - // Sybase - DBSystemSybase = DBSystemKey.String("sybase") - // Teradata - DBSystemTeradata = DBSystemKey.String("teradata") - // Vertica - DBSystemVertica = DBSystemKey.String("vertica") - // H2 - DBSystemH2 = DBSystemKey.String("h2") - // ColdFusion IMQ - DBSystemColdfusion = DBSystemKey.String("coldfusion") - // Apache Cassandra - DBSystemCassandra = DBSystemKey.String("cassandra") - // Apache HBase - DBSystemHBase = DBSystemKey.String("hbase") - // MongoDB - DBSystemMongoDB = DBSystemKey.String("mongodb") - // Redis - DBSystemRedis = DBSystemKey.String("redis") - // Couchbase - DBSystemCouchbase = DBSystemKey.String("couchbase") - // CouchDB - DBSystemCouchDB = DBSystemKey.String("couchdb") - // Microsoft Azure Cosmos DB - DBSystemCosmosDB = DBSystemKey.String("cosmosdb") - // Amazon DynamoDB - DBSystemDynamoDB = DBSystemKey.String("dynamodb") - // Neo4j - DBSystemNeo4j = DBSystemKey.String("neo4j") - // Apache Geode - DBSystemGeode = DBSystemKey.String("geode") - // Elasticsearch - DBSystemElasticsearch = DBSystemKey.String("elasticsearch") - // Memcached - DBSystemMemcached = DBSystemKey.String("memcached") - // CockroachDB - DBSystemCockroachdb = DBSystemKey.String("cockroachdb") -) - -// Connection-level attributes for Microsoft SQL Server -const ( - // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- - // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) - // connecting to. This name is used to determine the port of a named instance. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MSSQLSERVER' - // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer - // required (but still recommended if non-standard). - DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") -) - -// Call-level attributes for Cassandra -const ( - // The fetch size used for paging, i.e. how many rows will be returned at once. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5000 - DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") - // The consistency level of the query. Based on consistency values from - // [CQL](https://docs.datastax.com/en/cassandra- - // oss/3.0/cassandra/dml/dmlConfigConsistency.html). - // - // Type: Enum - // Required: No - // Stability: stable - DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") - // The name of the primary table that the operation is acting upon, including the - // keyspace name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'mytable' - // Note: This mirrors the db.sql.table attribute but references cassandra rather - // than sql. It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBCassandraTableKey = attribute.Key("db.cassandra.table") - // Whether or not the query is idempotent. - // - // Type: boolean - // Required: No - // Stability: stable - DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") - // The number of times a query was speculatively executed. Not set or `0` if the - // query was not executed speculatively. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 0, 2 - DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") - // The ID of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' - DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") - // The data center of the coordinating node for a query. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'us-west-2' - DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") -) - -var ( - // all - DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") - // each_quorum - DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") - // quorum - DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") - // local_quorum - DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") - // one - DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") - // two - DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") - // three - DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") - // local_one - DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") - // any - DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") - // serial - DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") - // local_serial - DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") -) - -// Call-level attributes for Redis -const ( - // The index of the database being accessed as used in the [`SELECT` - // command](https://redis.io/commands/select), provided as an integer. To be used - // instead of the generic `db.name` attribute. - // - // Type: int - // Required: Required, if other than the default database (`0`). - // Stability: stable - // Examples: 0, 1, 15 - DBRedisDBIndexKey = attribute.Key("db.redis.database_index") -) - -// Call-level attributes for MongoDB -const ( - // The collection being accessed within the database stated in `db.name`. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'customers', 'products' - DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") -) - -// Call-level attributes for SQL databases -const ( - // The name of the primary table that the operation is acting upon, including the - // database name (if applicable). - // - // Type: string - // Required: Recommended if available. - // Stability: stable - // Examples: 'public.users', 'customers' - // Note: It is not recommended to attempt any client-side parsing of - // `db.statement` just to get this property, but it should be set if it is - // provided by the library being instrumented. If the operation is acting upon an - // anonymous table, or more than one table, this value MUST NOT be set. - DBSQLTableKey = attribute.Key("db.sql.table") -) - -// This document defines the attributes used to report a single exception associated with a span. -const ( - // The type of the exception (its fully-qualified class name, if applicable). The - // dynamic type of the exception should be preferred over the static type in - // languages that support it. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'java.net.ConnectException', 'OSError' - ExceptionTypeKey = attribute.Key("exception.type") - // The exception message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" - ExceptionMessageKey = attribute.Key("exception.message") - // A stacktrace as a string in the natural representation for the language - // runtime. The representation is to be determined and documented by each language - // SIG. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test - // exception\\n at ' - // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' - // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' - // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' - ExceptionStacktraceKey = attribute.Key("exception.stacktrace") - // SHOULD be set to true if the exception event is recorded at a point where it is - // known that the exception is escaping the scope of the span. - // - // Type: boolean - // Required: No - // Stability: stable - // Note: An exception is considered to have escaped (or left) the scope of a span, - // if that span is ended while the exception is still logically "in flight". - // This may be actually "in flight" in some languages (e.g. if the exception - // is passed to a Context manager's `__exit__` method in Python) but will - // usually be caught at the point of recording the exception in most languages. - - // It is usually not possible to determine at the point where an exception is - // thrown - // whether it will escape the scope of a span. - // However, it is trivial to know that an exception - // will escape, if one checks for an active exception just before ending the span, - // as done in the [example above](#recording-an-exception). - - // It follows that an exception may still escape the scope of the span - // even if the `exception.escaped` attribute was not set or set to false, - // since the event might have been recorded at a time where it was not - // clear whether the exception will escape. - ExceptionEscapedKey = attribute.Key("exception.escaped") -) - -// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. -const ( - // Type of the trigger which caused this function execution. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: For the server/consumer span on the incoming side, - // `faas.trigger` MUST be set. - - // Clients invoking FaaS instances usually cannot set `faas.trigger`, - // since they would typically need to look in the payload to determine - // the event type. If clients set it, it should be the same as the - // trigger that corresponding incoming would have (i.e., this has - // nothing to do with the underlying transport used to make the API - // call to invoke the lambda, which is often HTTP). - FaaSTriggerKey = attribute.Key("faas.trigger") - // The execution ID of the current function execution. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' - FaaSExecutionKey = attribute.Key("faas.execution") -) - -var ( - // A response to some data source operation such as a database or filesystem read/write - FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") - // To provide an answer to an inbound HTTP request - FaaSTriggerHTTP = FaaSTriggerKey.String("http") - // A function is set to be executed when messages are sent to a messaging system - FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") - // A function is scheduled to be executed regularly - FaaSTriggerTimer = FaaSTriggerKey.String("timer") - // If none of the others apply - FaaSTriggerOther = FaaSTriggerKey.String("other") -) - -// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. -const ( - // The name of the source on which the triggering operation was performed. For - // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos - // DB to the database name. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myBucketName', 'myDBName' - FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") - // Describes the type of the operation that was performed on the data. - // - // Type: Enum - // Required: Always - // Stability: stable - FaaSDocumentOperationKey = attribute.Key("faas.document.operation") - // A string containing the time when the data was accessed in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSDocumentTimeKey = attribute.Key("faas.document.time") - // The document name/table subjected to the operation. For example, in Cloud - // Storage or S3 is the name of the file, and in Cosmos DB the table name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myFile.txt', 'myTableName' - FaaSDocumentNameKey = attribute.Key("faas.document.name") -) - -var ( - // When a new object is created - FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") - // When an object is modified - FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") - // When an object is deleted - FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") -) - -// Semantic Convention for FaaS scheduled to be executed regularly. -const ( - // A string containing the function invocation time in the [ISO - // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed - // in [UTC](https://www.w3.org/TR/NOTE-datetime). - // - // Type: string - // Required: Always - // Stability: stable - // Examples: '2020-01-23T13:47:06Z' - FaaSTimeKey = attribute.Key("faas.time") - // A string containing the schedule period as [Cron Expression](https://docs.oracl - // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0/5 * * * ? *' - FaaSCronKey = attribute.Key("faas.cron") -) - -// Contains additional attributes for incoming FaaS spans. -const ( - // A boolean that is true if the serverless function is executed for the first - // time (aka cold-start). - // - // Type: boolean - // Required: No - // Stability: stable - FaaSColdstartKey = attribute.Key("faas.coldstart") -) - -// Contains additional attributes for outgoing FaaS spans. -const ( - // The name of the invoked function. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'my-function' - // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked - // function. - FaaSInvokedNameKey = attribute.Key("faas.invoked_name") - // The cloud provider of the invoked function. - // - // Type: Enum - // Required: Always - // Stability: stable - // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked - // function. - FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") - // The cloud region of the invoked function. - // - // Type: string - // Required: For some cloud providers, like AWS or GCP, the region in which a - // function is hosted is essential to uniquely identify the function and also part - // of its endpoint. Since it's part of the endpoint being called, the region is - // always known to clients. In these cases, `faas.invoked_region` MUST be set - // accordingly. If the region is unknown to the client or not required for - // identifying the invoked function, setting `faas.invoked_region` is optional. - // Stability: stable - // Examples: 'eu-central-1' - // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked - // function. - FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") -) - -var ( - // Alibaba Cloud - FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") - // Amazon Web Services - FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") - // Microsoft Azure - FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") - // Google Cloud Platform - FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") - // Tencent Cloud - FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") -) - -// These attributes may be used for any network related operation. -const ( - // Transport protocol used. See note below. - // - // Type: Enum - // Required: No - // Stability: stable - NetTransportKey = attribute.Key("net.transport") - // Remote address of the peer (dotted decimal for IPv4 or - // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) - // - // Type: string - // Required: No - // Stability: stable - // Examples: '127.0.0.1' - NetPeerIPKey = attribute.Key("net.peer.ip") - // Remote port number. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 80, 8080, 443 - NetPeerPortKey = attribute.Key("net.peer.port") - // Remote hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra - // DNS lookup. - NetPeerNameKey = attribute.Key("net.peer.name") - // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '192.168.0.1' - NetHostIPKey = attribute.Key("net.host.ip") - // Like `net.peer.port` but for the host port. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 35555 - NetHostPortKey = attribute.Key("net.host.port") - // Local hostname or similar, see note below. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'localhost' - NetHostNameKey = attribute.Key("net.host.name") - // The internet connection type currently being used by the host. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'wifi' - NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") - // This describes more details regarding the connection.type. It may be the type - // of cell technology connection, but it could be used for describing details - // about a wifi connection. - // - // Type: Enum - // Required: No - // Stability: stable - // Examples: 'LTE' - NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") - // The name of the mobile carrier. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'sprint' - NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") - // The mobile carrier country code. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '310' - NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") - // The mobile carrier network code. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '001' - NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") - // The ISO 3166-1 alpha-2 2-character country code associated with the mobile - // carrier network. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'DE' - NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") -) - -var ( - // ip_tcp - NetTransportTCP = NetTransportKey.String("ip_tcp") - // ip_udp - NetTransportUDP = NetTransportKey.String("ip_udp") - // Another IP-based protocol - NetTransportIP = NetTransportKey.String("ip") - // Unix Domain socket. See below - NetTransportUnix = NetTransportKey.String("unix") - // Named or anonymous pipe. See note below - NetTransportPipe = NetTransportKey.String("pipe") - // In-process communication - NetTransportInProc = NetTransportKey.String("inproc") - // Something else (non IP-based) - NetTransportOther = NetTransportKey.String("other") -) - -var ( - // wifi - NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") - // wired - NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") - // cell - NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") - // unavailable - NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") - // unknown - NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") -) - -var ( - // GPRS - NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") - // EDGE - NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") - // UMTS - NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") - // CDMA - NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") - // EVDO Rel. 0 - NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") - // EVDO Rev. A - NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") - // CDMA2000 1XRTT - NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") - // HSDPA - NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") - // HSUPA - NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") - // HSPA - NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") - // IDEN - NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") - // EVDO Rev. B - NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") - // LTE - NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") - // EHRPD - NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") - // HSPAP - NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") - // GSM - NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") - // TD-SCDMA - NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") - // IWLAN - NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") - // 5G NR (New Radio) - NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") - // 5G NRNSA (New Radio Non-Standalone) - NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") - // LTE CA - NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") -) - -// Operations that access some remote service. -const ( - // The [`service.name`](../../resource/semantic_conventions/README.md#service) of - // the remote service. SHOULD be equal to the actual `service.name` resource - // attribute of the remote service if any. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AuthTokenCache' - PeerServiceKey = attribute.Key("peer.service") -) - -// These attributes may be used for any operation with an authenticated and/or authorized enduser. -const ( - // Username or client_id extracted from the access token or - // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the - // inbound request from outside the system. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'username' - EnduserIDKey = attribute.Key("enduser.id") - // Actual/assumed role the client is making the request under extracted from token - // or application security context. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'admin' - EnduserRoleKey = attribute.Key("enduser.role") - // Scopes or granted authorities the client currently possesses extracted from - // token or application security context. The value would come from the scope - // associated with an [OAuth 2.0 Access - // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value - // in a [SAML 2.0 Assertion](http://docs.oasis- - // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'read:message, write:files' - EnduserScopeKey = attribute.Key("enduser.scope") -) - -// These attributes may be used for any operation to store information about a thread that started a span. -const ( - // Current "managed" thread ID (as opposed to OS thread ID). - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - ThreadIDKey = attribute.Key("thread.id") - // Current thread name. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'main' - ThreadNameKey = attribute.Key("thread.name") -) - -// These attributes allow to report this unit of code and therefore to provide more context about the span. -const ( - // The method or function name, or equivalent (usually rightmost part of the code - // unit's name). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'serveRequest' - CodeFunctionKey = attribute.Key("code.function") - // The "namespace" within which `code.function` is defined. Usually the qualified - // class or module name, such that `code.namespace` + some separator + - // `code.function` form a unique identifier for the code unit. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'com.example.MyHTTPService' - CodeNamespaceKey = attribute.Key("code.namespace") - // The source code file name that identifies the code unit as uniquely as possible - // (preferably an absolute file path). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/usr/local/MyApplication/content_root/app/index.php' - CodeFilepathKey = attribute.Key("code.filepath") - // The line number in `code.filepath` best representing the operation. It SHOULD - // point within the code unit named in `code.function`. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 42 - CodeLineNumberKey = attribute.Key("code.lineno") -) - -// This document defines semantic conventions for HTTP client and server Spans. -const ( - // HTTP request method. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'GET', 'POST', 'HEAD' - HTTPMethodKey = attribute.Key("http.method") - // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. - // Usually the fragment is not transmitted over HTTP, but if it is known, it - // should be included nevertheless. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' - // Note: `http.url` MUST NOT contain credentials passed via URL in form of - // `https://username:password@www.example.com/`. In such case the attribute's - // value should be `https://www.example.com/`. - HTTPURLKey = attribute.Key("http.url") - // The full request target as passed in a HTTP request line or equivalent. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/path/12314/?q=ddds#123' - HTTPTargetKey = attribute.Key("http.target") - // The value of the [HTTP host - // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header - // should also be reported, see note. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'www.example.org' - // Note: When the header is present but empty the attribute SHOULD be set to the - // empty string. Note that this is a valid situation that is expected in certain - // cases, according the aforementioned [section of RFC - // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not - // set the attribute MUST NOT be set. - HTTPHostKey = attribute.Key("http.host") - // The URI scheme identifying the used protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'http', 'https' - HTTPSchemeKey = attribute.Key("http.scheme") - // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). - // - // Type: int - // Required: If and only if one was received/sent. - // Stability: stable - // Examples: 200 - HTTPStatusCodeKey = attribute.Key("http.status_code") - // Kind of HTTP protocol used. - // - // Type: Enum - // Required: No - // Stability: stable - // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` - // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. - HTTPFlavorKey = attribute.Key("http.flavor") - // Value of the [HTTP User- - // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the - // client. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' - HTTPUserAgentKey = attribute.Key("http.user_agent") - // The size of the request payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") - // The size of the uncompressed request payload body after transport decoding. Not - // set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") - // The size of the response payload body in bytes. This is the number of bytes - // transferred excluding headers and is often, but not always, present as the - // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For - // requests using transport encoding, this should be the compressed size. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 3495 - HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") - // The size of the uncompressed response payload body after transport decoding. - // Not set if transport encoding not used. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 5493 - HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") - // The ordinal number of request re-sending attempt. - // - // Type: int - // Required: If and only if a request was retried. - // Stability: stable - // Examples: 3 - HTTPRetryCountKey = attribute.Key("http.retry_count") -) - -var ( - // HTTP/1.0 - HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") - // HTTP/1.1 - HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") - // HTTP/2 - HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") - // HTTP/3 - HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") - // SPDY protocol - HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") - // QUIC protocol - HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") -) - -// Semantic Convention for HTTP Server -const ( - // The primary server name of the matched virtual host. This should be obtained - // via configuration. If no such configuration can be obtained, this attribute - // MUST NOT be set ( `net.host.name` should be used instead). - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'example.com' - // Note: `http.url` is usually not readily available on the server side but would - // have to be assembled in a cumbersome and sometimes lossy process from other - // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus - // preferred to supply the raw data that is available. - HTTPServerNameKey = attribute.Key("http.server_name") - // The matched route (path template). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '/users/:userID?' - HTTPRouteKey = attribute.Key("http.route") - // The IP address of the original client behind all proxies, if known (e.g. from - // [X-Forwarded-For](https://developer.mozilla.org/en- - // US/docs/Web/HTTP/Headers/X-Forwarded-For)). - // - // Type: string - // Required: No - // Stability: stable - // Examples: '83.164.160.102' - // Note: This is not necessarily the same as `net.peer.ip`, which would - // identify the network-level peer, which may be a proxy. - - // This attribute should be set when a source of information different - // from the one used for `net.peer.ip`, is available even if that other - // source just confirms the same value as `net.peer.ip`. - // Rationale: For `net.peer.ip`, one typically does not know if it - // comes from a proxy, reverse proxy, or the actual client. Setting - // `http.client_ip` when it's the same as `net.peer.ip` means that - // one is at least somewhat confident that the address is not that of - // the closest proxy. - HTTPClientIPKey = attribute.Key("http.client_ip") -) - -// Attributes that exist for multiple DynamoDB request types. -const ( - // The keys in the `RequestItems` object field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'Users', 'Cats' - AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") - // The JSON-serialized value of each item in the `ConsumedCapacity` response - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { - // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, - // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, - // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": - // "string", "WriteCapacityUnits": number }' - AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") - // The JSON-serialized value of the `ItemCollectionMetrics` response field. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, - // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : - // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": - // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' - AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") - // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") - // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. - // - // Type: double - // Required: No - // Stability: stable - // Examples: 1.0, 2.0 - AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") - // The value of the `ConsistentRead` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") - // The value of the `ProjectionExpression` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, - // ProductReviews' - AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") - // The value of the `Limit` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") - // The value of the `AttributesToGet` request parameter. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'lives', 'id' - AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") - // The value of the `IndexName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'name_to_group' - AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") - // The value of the `Select` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'ALL_ATTRIBUTES', 'COUNT' - AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") -) - -// DynamoDB.CreateTable -const ( - // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request - // field - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": - // number, "WriteCapacityUnits": number } }' - AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") - // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": - // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", - // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], - // "ProjectionType": "string" } }' - AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") -) - -// DynamoDB.ListTables -const ( - // The value of the `ExclusiveStartTableName` request parameter. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Users', 'CatsTable' - AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") - // The the number of items in the `TableNames` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 20 - AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") -) - -// DynamoDB.Query -const ( - // The value of the `ScanIndexForward` request parameter. - // - // Type: boolean - // Required: No - // Stability: stable - AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") -) - -// DynamoDB.Scan -const ( - // The value of the `Segment` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") - // The value of the `TotalSegments` request parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 100 - AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") - // The value of the `Count` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 10 - AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") - // The value of the `ScannedCount` response parameter. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 50 - AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") -) - -// DynamoDB.UpdateTable -const ( - // The JSON-serialized value of each item in the `AttributeDefinitions` request - // field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' - AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") - // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` - // request field. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { - // "AttributeName": "string", "KeyType": "string" } ], "Projection": { - // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, - // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": - // number } }' - AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") -) - -// This document defines the attributes used in messaging systems. -const ( - // A string identifying the messaging system. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' - MessagingSystemKey = attribute.Key("messaging.system") - // The message destination name. This might be equal to the span name but is - // required nevertheless. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'MyQueue', 'MyTopic' - MessagingDestinationKey = attribute.Key("messaging.destination") - // The kind of message destination - // - // Type: Enum - // Required: Required only if the message destination is either a `queue` or - // `topic`. - // Stability: stable - MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") - // A boolean that is true if the message destination is temporary. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") - // The name of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'AMQP', 'MQTT' - MessagingProtocolKey = attribute.Key("messaging.protocol") - // The version of the transport protocol. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '0.9.1' - MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") - // Connection string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tibjmsnaming://localhost:7222', - // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' - MessagingURLKey = attribute.Key("messaging.url") - // A value used by the messaging system as an identifier for the message, - // represented as a string. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '452a7c7c7c7048c2f887f61572b18fc2' - MessagingMessageIDKey = attribute.Key("messaging.message_id") - // The [conversation ID](#conversations) identifying the conversation to which the - // message belongs, represented as a string. Sometimes called "Correlation ID". - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'MyConversationID' - MessagingConversationIDKey = attribute.Key("messaging.conversation_id") - // The (uncompressed) size of the message payload in bytes. Also use this - // attribute if it is unknown whether the compressed or uncompressed payload size - // is reported. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2738 - MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") - // The compressed size of the message payload in bytes. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2048 - MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") -) - -var ( - // A message sent to a queue - MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") - // A message sent to a topic - MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") -) - -// Semantic convention for a consumer of messages received from a messaging system -const ( - // A string identifying the kind of message consumption as defined in the - // [Operation names](#operation-names) section above. If the operation is "send", - // this attribute MUST NOT be set, since the operation can be inferred from the - // span kind in that case. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingOperationKey = attribute.Key("messaging.operation") - // The identifier for the consumer receiving a message. For Kafka, set it to - // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are - // present, or only `messaging.kafka.consumer_group`. For brokers, such as - // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the - // message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'mygroup - client-6' - MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") -) - -var ( - // receive - MessagingOperationReceive = MessagingOperationKey.String("receive") - // process - MessagingOperationProcess = MessagingOperationKey.String("process") -) - -// Attributes for RabbitMQ -const ( - // RabbitMQ message routing key. - // - // Type: string - // Required: Unless it is empty. - // Stability: stable - // Examples: 'myKey' - MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") -) - -// Attributes for Apache Kafka -const ( - // Message keys in Kafka are used for grouping alike messages to ensure they're - // processed on the same partition. They differ from `messaging.message_id` in - // that they're not unique. If the key is `null`, the attribute MUST NOT be set. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'myKey' - // Note: If the key type is not string, it's string representation has to be - // supplied for the attribute. If the key has no unambiguous, canonical string - // form, don't include its value. - MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") - // Name of the Kafka Consumer Group that is handling the message. Only applies to - // consumers, not producers. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'my-group' - MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") - // Client ID for the Consumer or Producer that is handling the message. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'client-5' - MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") - // Partition the message is sent to. - // - // Type: int - // Required: No - // Stability: stable - // Examples: 2 - MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") - // A boolean that is true if the message is a tombstone. - // - // Type: boolean - // Required: If missing, it is assumed to be false. - // Stability: stable - MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") -) - -// Attributes for Apache RocketMQ -const ( - // Namespace of RocketMQ resources, resources in different namespaces are - // individual. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myNamespace' - MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") - // Name of the RocketMQ producer/consumer group that is handling the message. The - // client type is identified by the SpanKind. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myConsumerGroup' - MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") - // The unique identifier for each client. - // - // Type: string - // Required: Always - // Stability: stable - // Examples: 'myhost@8742@s8083jm' - MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") - // Type of message. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") - // The secondary classifier of message besides topic. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'tagA' - MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") - // Key(s) of message, another way to mark message besides message id. - // - // Type: string[] - // Required: No - // Stability: stable - // Examples: 'keyA', 'keyB' - MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") - // Model of message consumption. This only applies to consumer spans. - // - // Type: Enum - // Required: No - // Stability: stable - MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") -) - -var ( - // Normal message - MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") - // FIFO message - MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") - // Delay message - MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") - // Transaction message - MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") -) - -var ( - // Clustering consumption model - MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") - // Broadcasting consumption model - MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") -) - -// This document defines semantic conventions for remote procedure calls. -const ( - // A string identifying the remoting system. See below for a list of well-known - // identifiers. - // - // Type: Enum - // Required: Always - // Stability: stable - RPCSystemKey = attribute.Key("rpc.system") - // The full (logical) name of the service being called, including its package - // name, if applicable. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'myservice.EchoService' - // Note: This is the logical name of the service from the RPC interface - // perspective, which can be different from the name of any implementing class. - // The `code.namespace` attribute may be used to store the latter (despite the - // attribute name, it may include a class name; e.g., class with method actually - // executing the call on the server side, RPC client stub class on the client - // side). - RPCServiceKey = attribute.Key("rpc.service") - // The name of the (logical) method being called, must be equal to the $method - // part in the span name. - // - // Type: string - // Required: No, but recommended - // Stability: stable - // Examples: 'exampleMethod' - // Note: This is the logical name of the method from the RPC interface - // perspective, which can be different from the name of any implementing - // method/function. The `code.function` attribute may be used to store the latter - // (e.g., method actually executing the call on the server side, RPC client stub - // method on the client side). - RPCMethodKey = attribute.Key("rpc.method") -) - -var ( - // gRPC - RPCSystemGRPC = RPCSystemKey.String("grpc") - // Java RMI - RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") - // .NET WCF - RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") - // Apache Dubbo - RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") -) - -// Tech-specific attributes for gRPC. -const ( - // The [numeric status - // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC - // request. - // - // Type: Enum - // Required: Always - // Stability: stable - RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") -) - -var ( - // OK - RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) - // CANCELLED - RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) - // UNKNOWN - RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) - // INVALID_ARGUMENT - RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) - // DEADLINE_EXCEEDED - RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) - // NOT_FOUND - RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) - // ALREADY_EXISTS - RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) - // PERMISSION_DENIED - RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) - // RESOURCE_EXHAUSTED - RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) - // FAILED_PRECONDITION - RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) - // ABORTED - RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) - // OUT_OF_RANGE - RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) - // UNIMPLEMENTED - RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) - // INTERNAL - RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) - // UNAVAILABLE - RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) - // DATA_LOSS - RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) - // UNAUTHENTICATED - RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) -) - -// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). -const ( - // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC - // 1.0 does not specify this, the value can be omitted. - // - // Type: string - // Required: If missing, it is assumed to be "1.0". - // Stability: stable - // Examples: '2.0', '1.0' - RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") - // `id` property of request or response. Since protocol allows id to be int, - // string, `null` or missing (for notifications), value is expected to be cast to - // string for simplicity. Use empty string in case of `null` value. Omit entirely - // if this is a notification. - // - // Type: string - // Required: No - // Stability: stable - // Examples: '10', 'request-7', '' - RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") - // `error.code` property of response if it is an error response. - // - // Type: int - // Required: If missing, response is assumed to be successful. - // Stability: stable - // Examples: -32700, 100 - RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") - // `error.message` property of response if it is an error response. - // - // Type: string - // Required: No - // Stability: stable - // Examples: 'Parse error', 'User already exists' - RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") -) - -// RPC received/sent message. -const ( - // Whether this is a received or sent message. - // - // Type: Enum - // Required: No - // Stability: stable - MessageTypeKey = attribute.Key("message.type") - // MUST be calculated as two different counters starting from `1` one for sent - // messages and one for received message. - // - // Type: int - // Required: No - // Stability: stable - // Note: This way we guarantee that the values will be consistent between - // different implementations. - MessageIDKey = attribute.Key("message.id") - // Compressed size of the message in bytes. - // - // Type: int - // Required: No - // Stability: stable - MessageCompressedSizeKey = attribute.Key("message.compressed_size") - // Uncompressed size of the message in bytes. - // - // Type: int - // Required: No - // Stability: stable - MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") -) - -var ( - // sent - MessageTypeSent = MessageTypeKey.String("SENT") - // received - MessageTypeReceived = MessageTypeKey.String("RECEIVED") -) diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go deleted file mode 100644 index caf7249de..000000000 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -import ( - "go.opentelemetry.io/otel/internal/global" - "go.opentelemetry.io/otel/trace" -) - -// Tracer creates a named tracer that implements Tracer interface. -// If the name is an empty string then provider uses default name. -// -// This is short for GetTracerProvider().Tracer(name, opts...) -func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { - return GetTracerProvider().Tracer(name, opts...) -} - -// GetTracerProvider returns the registered global trace provider. -// If none is registered then an instance of NoopTracerProvider is returned. -// -// Use the trace provider to create a named tracer. E.g. -// -// tracer := otel.GetTracerProvider().Tracer("example.com/foo") -// -// or -// -// tracer := otel.Tracer("example.com/foo") -func GetTracerProvider() trace.TracerProvider { - return global.TracerProvider() -} - -// SetTracerProvider registers `tp` as the global trace provider. -func SetTracerProvider(tp trace.TracerProvider) { - global.SetTracerProvider(tp) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go deleted file mode 100644 index cb3efbb9a..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "time" - - "go.opentelemetry.io/otel/attribute" -) - -// TracerConfig is a group of options for a Tracer. -type TracerConfig struct { - instrumentationVersion string - // Schema URL of the telemetry emitted by the Tracer. - schemaURL string - attrs attribute.Set -} - -// InstrumentationVersion returns the version of the library providing instrumentation. -func (t *TracerConfig) InstrumentationVersion() string { - return t.instrumentationVersion -} - -// InstrumentationAttributes returns the attributes associated with the library -// providing instrumentation. -func (t *TracerConfig) InstrumentationAttributes() attribute.Set { - return t.attrs -} - -// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. -func (t *TracerConfig) SchemaURL() string { - return t.schemaURL -} - -// NewTracerConfig applies all the options to a returned TracerConfig. -func NewTracerConfig(options ...TracerOption) TracerConfig { - var config TracerConfig - for _, option := range options { - config = option.apply(config) - } - return config -} - -// TracerOption applies an option to a TracerConfig. -type TracerOption interface { - apply(TracerConfig) TracerConfig -} - -type tracerOptionFunc func(TracerConfig) TracerConfig - -func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { - return fn(cfg) -} - -// SpanConfig is a group of options for a Span. -type SpanConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - links []Link - newRoot bool - spanKind SpanKind - stackTrace bool -} - -// Attributes describe the associated qualities of a Span. -func (cfg *SpanConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in a Span life-cycle. -func (cfg *SpanConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *SpanConfig) StackTrace() bool { - return cfg.stackTrace -} - -// Links are the associations a Span has with other Spans. -func (cfg *SpanConfig) Links() []Link { - return cfg.links -} - -// NewRoot identifies a Span as the root Span for a new trace. This is -// commonly used when an existing trace crosses trust boundaries and the -// remote parent span context should be ignored for security. -func (cfg *SpanConfig) NewRoot() bool { - return cfg.newRoot -} - -// SpanKind is the role a Span has in a trace. -func (cfg *SpanConfig) SpanKind() SpanKind { - return cfg.spanKind -} - -// NewSpanStartConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanStart(c) - } - return c -} - -// NewSpanEndConfig applies all the options to a returned SpanConfig. -// No validation is performed on the returned SpanConfig (e.g. no uniqueness -// checking or bounding of data), it is left to the SDK to perform this -// action. -func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { - var c SpanConfig - for _, option := range options { - c = option.applySpanEnd(c) - } - return c -} - -// SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created. -type SpanStartOption interface { - applySpanStart(SpanConfig) SpanConfig -} - -type spanOptionFunc func(SpanConfig) SpanConfig - -func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { - return fn(cfg) -} - -// SpanEndOption applies an option to a SpanConfig. These options are -// applicable only when the span is ended. -type SpanEndOption interface { - applySpanEnd(SpanConfig) SpanConfig -} - -// EventConfig is a group of options for an Event. -type EventConfig struct { - attributes []attribute.KeyValue - timestamp time.Time - stackTrace bool -} - -// Attributes describe the associated qualities of an Event. -func (cfg *EventConfig) Attributes() []attribute.KeyValue { - return cfg.attributes -} - -// Timestamp is a time in an Event life-cycle. -func (cfg *EventConfig) Timestamp() time.Time { - return cfg.timestamp -} - -// StackTrace checks whether stack trace capturing is enabled. -func (cfg *EventConfig) StackTrace() bool { - return cfg.stackTrace -} - -// NewEventConfig applies all the EventOptions to a returned EventConfig. If no -// timestamp option is passed, the returned EventConfig will have a Timestamp -// set to the call time, otherwise no validation is performed on the returned -// EventConfig. -func NewEventConfig(options ...EventOption) EventConfig { - var c EventConfig - for _, option := range options { - c = option.applyEvent(c) - } - if c.timestamp.IsZero() { - c.timestamp = time.Now() - } - return c -} - -// EventOption applies span event options to an EventConfig. -type EventOption interface { - applyEvent(EventConfig) EventConfig -} - -// SpanOption are options that can be used at both the beginning and end of a span. -type SpanOption interface { - SpanStartOption - SpanEndOption -} - -// SpanStartEventOption are options that can be used at the start of a span, or with an event. -type SpanStartEventOption interface { - SpanStartOption - EventOption -} - -// SpanEndEventOption are options that can be used at the end of a span, or with an event. -type SpanEndEventOption interface { - SpanEndOption - EventOption -} - -type attributeOption []attribute.KeyValue - -func (o attributeOption) applySpan(c SpanConfig) SpanConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} -func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o attributeOption) applyEvent(c EventConfig) EventConfig { - c.attributes = append(c.attributes, []attribute.KeyValue(o)...) - return c -} - -var _ SpanStartEventOption = attributeOption{} - -// WithAttributes adds the attributes related to a span life-cycle event. -// These attributes are used to describe the work a Span represents when this -// option is provided to a Span's start or end events. Otherwise, these -// attributes provide additional information about the event being recorded -// (e.g. error, state change, processing progress, system event). -// -// If multiple of these options are passed the attributes of each successive -// option will extend the attributes instead of overwriting. There is no -// guarantee of uniqueness in the resulting attributes. -func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { - return attributeOption(attributes) -} - -// SpanEventOption are options that can be used with an event or a span. -type SpanEventOption interface { - SpanOption - EventOption -} - -type timestampOption time.Time - -func (o timestampOption) applySpan(c SpanConfig) SpanConfig { - c.timestamp = time.Time(o) - return c -} -func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } -func (o timestampOption) applyEvent(c EventConfig) EventConfig { - c.timestamp = time.Time(o) - return c -} - -var _ SpanEventOption = timestampOption{} - -// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. -// started, stopped, errored). -func WithTimestamp(t time.Time) SpanEventOption { - return timestampOption(t) -} - -type stackTraceOption bool - -func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { - c.stackTrace = bool(o) - return c -} -func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } - -// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). -func WithStackTrace(b bool) SpanEndEventOption { - return stackTraceOption(b) -} - -// WithLinks adds links to a Span. The links are added to the existing Span -// links, i.e. this does not overwrite. Links with invalid span context are ignored. -func WithLinks(links ...Link) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.links = append(cfg.links, links...) - return cfg - }) -} - -// WithNewRoot specifies that the Span should be treated as a root Span. Any -// existing parent span context will be ignored when defining the Span's trace -// identifiers. -func WithNewRoot() SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.newRoot = true - return cfg - }) -} - -// WithSpanKind sets the SpanKind of a Span. -func WithSpanKind(kind SpanKind) SpanStartOption { - return spanOptionFunc(func(cfg SpanConfig) SpanConfig { - cfg.spanKind = kind - return cfg - }) -} - -// WithInstrumentationVersion sets the instrumentation version. -func WithInstrumentationVersion(version string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.instrumentationVersion = version - return cfg - }) -} - -// WithInstrumentationAttributes sets the instrumentation attributes. -// -// The passed attributes will be de-duplicated. -func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption { - return tracerOptionFunc(func(config TracerConfig) TracerConfig { - config.attrs = attribute.NewSet(attr...) - return config - }) -} - -// WithSchemaURL sets the schema URL for the Tracer. -func WithSchemaURL(schemaURL string) TracerOption { - return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { - cfg.schemaURL = schemaURL - return cfg - }) -} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go deleted file mode 100644 index 76f9a083c..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/context.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import "context" - -type traceContextKeyType int - -const currentSpanKey traceContextKeyType = iota - -// ContextWithSpan returns a copy of parent with span set as the current Span. -func ContextWithSpan(parent context.Context, span Span) context.Context { - return context.WithValue(parent, currentSpanKey, span) -} - -// ContextWithSpanContext returns a copy of parent with sc as the current -// Span. The Span implementation that wraps sc is non-recording and performs -// no operations other than to return sc as the SpanContext from the -// SpanContext method. -func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { - return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) -} - -// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly -// as a remote SpanContext and as the current Span. The Span implementation -// that wraps rsc is non-recording and performs no operations other than to -// return rsc as the SpanContext from the SpanContext method. -func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { - return ContextWithSpanContext(parent, rsc.WithRemote(true)) -} - -// SpanFromContext returns the current Span from ctx. -// -// If no Span is currently set in ctx an implementation of a Span that -// performs no operations is returned. -func SpanFromContext(ctx context.Context) Span { - if ctx == nil { - return noopSpan{} - } - if span, ok := ctx.Value(currentSpanKey).(Span); ok { - return span - } - return noopSpan{} -} - -// SpanContextFromContext returns the current Span's SpanContext. -func SpanContextFromContext(ctx context.Context) SpanContext { - return SpanFromContext(ctx).SpanContext() -} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go deleted file mode 100644 index ab0346f96..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package trace provides an implementation of the tracing part of the -OpenTelemetry API. - -To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. In its simplest form: - - var tracer trace.Tracer - - func init() { - tracer = otel.Tracer("instrumentation/package/name") - } - - func operation(ctx context.Context) { - var span trace.Span - ctx, span = tracer.Start(ctx, "operation") - defer span.End() - // ... - } - -A Tracer is unique to the instrumentation and is used to create Spans. -Instrumentation should be designed to accept a TracerProvider from which it -can create its own unique Tracer. Alternatively, the registered global -TracerProvider from the go.opentelemetry.io/otel package can be used as -a default. - - const ( - name = "instrumentation/package/name" - version = "0.1.0" - ) - - type Instrumentation struct { - tracer trace.Tracer - } - - func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { - if tp == nil { - tp = otel.TracerProvider() - } - return &Instrumentation{ - tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), - } - } - - func operation(ctx context.Context, inst *Instrumentation) { - var span trace.Span - ctx, span = inst.tracer.Start(ctx, "operation") - defer span.End() - // ... - } -*/ -package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go deleted file mode 100644 index 88fcb8161..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -// nonRecordingSpan is a minimal implementation of a Span that wraps a -// SpanContext. It performs no operations other than to return the wrapped -// SpanContext. -type nonRecordingSpan struct { - noopSpan - - sc SpanContext -} - -// SpanContext returns the wrapped SpanContext. -func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go deleted file mode 100644 index 73950f207..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" -) - -// NewNoopTracerProvider returns an implementation of TracerProvider that -// performs no operations. The Tracer and Spans created from the returned -// TracerProvider also perform no operations. -func NewNoopTracerProvider() TracerProvider { - return noopTracerProvider{} -} - -type noopTracerProvider struct{} - -var _ TracerProvider = noopTracerProvider{} - -// Tracer returns noop implementation of Tracer. -func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { - return noopTracer{} -} - -// noopTracer is an implementation of Tracer that preforms no operations. -type noopTracer struct{} - -var _ Tracer = noopTracer{} - -// Start carries forward a non-recording Span, if one is present in the context, otherwise it -// creates a no-op Span. -func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { - span := SpanFromContext(ctx) - if _, ok := span.(nonRecordingSpan); !ok { - // span is likely already a noopSpan, but let's be sure - span = noopSpan{} - } - return ContextWithSpan(ctx, span), span -} - -// noopSpan is an implementation of Span that preforms no operations. -type noopSpan struct{} - -var _ Span = noopSpan{} - -// SpanContext returns an empty span context. -func (noopSpan) SpanContext() SpanContext { return SpanContext{} } - -// IsRecording always returns false. -func (noopSpan) IsRecording() bool { return false } - -// SetStatus does nothing. -func (noopSpan) SetStatus(codes.Code, string) {} - -// SetError does nothing. -func (noopSpan) SetError(bool) {} - -// SetAttributes does nothing. -func (noopSpan) SetAttributes(...attribute.KeyValue) {} - -// End does nothing. -func (noopSpan) End(...SpanEndOption) {} - -// RecordError does nothing. -func (noopSpan) RecordError(error, ...EventOption) {} - -// AddEvent does nothing. -func (noopSpan) AddEvent(string, ...EventOption) {} - -// SetName does nothing. -func (noopSpan) SetName(string) {} - -// TracerProvider returns a no-op TracerProvider. -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go deleted file mode 100644 index 4aa94f79f..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ /dev/null @@ -1,551 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "bytes" - "context" - "encoding/hex" - "encoding/json" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/codes" -) - -const ( - // FlagsSampled is a bitmask with the sampled bit set. A SpanContext - // with the sampling bit set means the span is sampled. - FlagsSampled = TraceFlags(0x01) - - errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" - - errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" - errNilTraceID errorConst = "trace-id can't be all zero" - - errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" - errNilSpanID errorConst = "span-id can't be all zero" -) - -type errorConst string - -func (e errorConst) Error() string { - return string(e) -} - -// TraceID is a unique identity of a trace. -// nolint:revive // revive complains about stutter of `trace.TraceID`. -type TraceID [16]byte - -var nilTraceID TraceID -var _ json.Marshaler = nilTraceID - -// IsValid checks whether the trace TraceID is valid. A valid trace ID does -// not consist of zeros only. -func (t TraceID) IsValid() bool { - return !bytes.Equal(t[:], nilTraceID[:]) -} - -// MarshalJSON implements a custom marshal function to encode TraceID -// as a hex string. -func (t TraceID) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String returns the hex string representation form of a TraceID. -func (t TraceID) String() string { - return hex.EncodeToString(t[:]) -} - -// SpanID is a unique identity of a span in a trace. -type SpanID [8]byte - -var nilSpanID SpanID -var _ json.Marshaler = nilSpanID - -// IsValid checks whether the SpanID is valid. A valid SpanID does not consist -// of zeros only. -func (s SpanID) IsValid() bool { - return !bytes.Equal(s[:], nilSpanID[:]) -} - -// MarshalJSON implements a custom marshal function to encode SpanID -// as a hex string. -func (s SpanID) MarshalJSON() ([]byte, error) { - return json.Marshal(s.String()) -} - -// String returns the hex string representation form of a SpanID. -func (s SpanID) String() string { - return hex.EncodeToString(s[:]) -} - -// TraceIDFromHex returns a TraceID from a hex string if it is compliant with -// the W3C trace-context specification. See more at -// https://www.w3.org/TR/trace-context/#trace-id -// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. -func TraceIDFromHex(h string) (TraceID, error) { - t := TraceID{} - if len(h) != 32 { - return t, errInvalidTraceIDLength - } - - if err := decodeHex(h, t[:]); err != nil { - return t, err - } - - if !t.IsValid() { - return t, errNilTraceID - } - return t, nil -} - -// SpanIDFromHex returns a SpanID from a hex string if it is compliant -// with the w3c trace-context specification. -// See more at https://www.w3.org/TR/trace-context/#parent-id -func SpanIDFromHex(h string) (SpanID, error) { - s := SpanID{} - if len(h) != 16 { - return s, errInvalidSpanIDLength - } - - if err := decodeHex(h, s[:]); err != nil { - return s, err - } - - if !s.IsValid() { - return s, errNilSpanID - } - return s, nil -} - -func decodeHex(h string, b []byte) error { - for _, r := range h { - switch { - case 'a' <= r && r <= 'f': - continue - case '0' <= r && r <= '9': - continue - default: - return errInvalidHexID - } - } - - decoded, err := hex.DecodeString(h) - if err != nil { - return err - } - - copy(b, decoded) - return nil -} - -// TraceFlags contains flags that can be set on a SpanContext. -type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. - -// IsSampled returns if the sampling bit is set in the TraceFlags. -func (tf TraceFlags) IsSampled() bool { - return tf&FlagsSampled == FlagsSampled -} - -// WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. - if sampled { - return tf | FlagsSampled - } - - return tf &^ FlagsSampled -} - -// MarshalJSON implements a custom marshal function to encode TraceFlags -// as a hex string. -func (tf TraceFlags) MarshalJSON() ([]byte, error) { - return json.Marshal(tf.String()) -} - -// String returns the hex string representation form of TraceFlags. -func (tf TraceFlags) String() string { - return hex.EncodeToString([]byte{byte(tf)}[:]) -} - -// SpanContextConfig contains mutable fields usable for constructing -// an immutable SpanContext. -type SpanContextConfig struct { - TraceID TraceID - SpanID SpanID - TraceFlags TraceFlags - TraceState TraceState - Remote bool -} - -// NewSpanContext constructs a SpanContext using values from the provided -// SpanContextConfig. -func NewSpanContext(config SpanContextConfig) SpanContext { - return SpanContext{ - traceID: config.TraceID, - spanID: config.SpanID, - traceFlags: config.TraceFlags, - traceState: config.TraceState, - remote: config.Remote, - } -} - -// SpanContext contains identifying trace information about a Span. -type SpanContext struct { - traceID TraceID - spanID SpanID - traceFlags TraceFlags - traceState TraceState - remote bool -} - -var _ json.Marshaler = SpanContext{} - -// IsValid returns if the SpanContext is valid. A valid span context has a -// valid TraceID and SpanID. -func (sc SpanContext) IsValid() bool { - return sc.HasTraceID() && sc.HasSpanID() -} - -// IsRemote indicates whether the SpanContext represents a remotely-created Span. -func (sc SpanContext) IsRemote() bool { - return sc.remote -} - -// WithRemote returns a copy of sc with the Remote property set to remote. -func (sc SpanContext) WithRemote(remote bool) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: remote, - } -} - -// TraceID returns the TraceID from the SpanContext. -func (sc SpanContext) TraceID() TraceID { - return sc.traceID -} - -// HasTraceID checks if the SpanContext has a valid TraceID. -func (sc SpanContext) HasTraceID() bool { - return sc.traceID.IsValid() -} - -// WithTraceID returns a new SpanContext with the TraceID replaced. -func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { - return SpanContext{ - traceID: traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// SpanID returns the SpanID from the SpanContext. -func (sc SpanContext) SpanID() SpanID { - return sc.spanID -} - -// HasSpanID checks if the SpanContext has a valid SpanID. -func (sc SpanContext) HasSpanID() bool { - return sc.spanID.IsValid() -} - -// WithSpanID returns a new SpanContext with the SpanID replaced. -func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: spanID, - traceFlags: sc.traceFlags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceFlags returns the flags from the SpanContext. -func (sc SpanContext) TraceFlags() TraceFlags { - return sc.traceFlags -} - -// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. -func (sc SpanContext) IsSampled() bool { - return sc.traceFlags.IsSampled() -} - -// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. -func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: flags, - traceState: sc.traceState, - remote: sc.remote, - } -} - -// TraceState returns the TraceState from the SpanContext. -func (sc SpanContext) TraceState() TraceState { - return sc.traceState -} - -// WithTraceState returns a new SpanContext with the TraceState replaced. -func (sc SpanContext) WithTraceState(state TraceState) SpanContext { - return SpanContext{ - traceID: sc.traceID, - spanID: sc.spanID, - traceFlags: sc.traceFlags, - traceState: state, - remote: sc.remote, - } -} - -// Equal is a predicate that determines whether two SpanContext values are equal. -func (sc SpanContext) Equal(other SpanContext) bool { - return sc.traceID == other.traceID && - sc.spanID == other.spanID && - sc.traceFlags == other.traceFlags && - sc.traceState.String() == other.traceState.String() && - sc.remote == other.remote -} - -// MarshalJSON implements a custom marshal function to encode a SpanContext. -func (sc SpanContext) MarshalJSON() ([]byte, error) { - return json.Marshal(SpanContextConfig{ - TraceID: sc.traceID, - SpanID: sc.spanID, - TraceFlags: sc.traceFlags, - TraceState: sc.traceState, - Remote: sc.remote, - }) -} - -// Span is the individual component of a trace. It represents a single named -// and timed operation of a workflow that is traced. A Tracer is used to -// create a Span and it is then up to the operation the Span represents to -// properly end the Span when the operation itself ends. -// -// Warning: methods may be added to this interface in minor releases. -type Span interface { - // End completes the Span. The Span is considered complete and ready to be - // delivered through the rest of the telemetry pipeline after this method - // is called. Therefore, updates to the Span are not allowed after this - // method has been called. - End(options ...SpanEndOption) - - // AddEvent adds an event with the provided name and options. - AddEvent(name string, options ...EventOption) - - // IsRecording returns the recording state of the Span. It will return - // true if the Span is active and events can be recorded. - IsRecording() bool - - // RecordError will record err as an exception span event for this span. An - // additional call to SetStatus is required if the Status of the Span should - // be set to Error, as this method does not change the Span status. If this - // span is not being recorded or err is nil then this method does nothing. - RecordError(err error, options ...EventOption) - - // SpanContext returns the SpanContext of the Span. The returned SpanContext - // is usable even after the End method has been called for the Span. - SpanContext() SpanContext - - // SetStatus sets the status of the Span in the form of a code and a - // description, provided the status hasn't already been set to a higher - // value before (OK > Error > Unset). The description is only included in a - // status when the code is for an error. - SetStatus(code codes.Code, description string) - - // SetName sets the Span name. - SetName(name string) - - // SetAttributes sets kv as attributes of the Span. If a key from kv - // already exists for an attribute of the Span it will be overwritten with - // the value contained in kv. - SetAttributes(kv ...attribute.KeyValue) - - // TracerProvider returns a TracerProvider that can be used to generate - // additional Spans on the same telemetry pipeline as the current Span. - TracerProvider() TracerProvider -} - -// Link is the relationship between two Spans. The relationship can be within -// the same Trace or across different Traces. -// -// For example, a Link is used in the following situations: -// -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. -type Link struct { - // SpanContext of the linked Span. - SpanContext SpanContext - - // Attributes describe the aspects of the link. - Attributes []attribute.KeyValue -} - -// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. -func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { - return Link{ - SpanContext: SpanContextFromContext(ctx), - Attributes: attrs, - } -} - -// SpanKind is the role a Span plays in a Trace. -type SpanKind int - -// As a convenience, these match the proto definition, see -// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 -// -// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` -// to coerce a span kind to a valid value. -const ( - // SpanKindUnspecified is an unspecified SpanKind and is not a valid - // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal - // if it is received. - SpanKindUnspecified SpanKind = 0 - // SpanKindInternal is a SpanKind for a Span that represents an internal - // operation within an application. - SpanKindInternal SpanKind = 1 - // SpanKindServer is a SpanKind for a Span that represents the operation - // of handling a request from a client. - SpanKindServer SpanKind = 2 - // SpanKindClient is a SpanKind for a Span that represents the operation - // of client making a request to a server. - SpanKindClient SpanKind = 3 - // SpanKindProducer is a SpanKind for a Span that represents the operation - // of a producer sending a message to a message broker. Unlike - // SpanKindClient and SpanKindServer, there is often no direct - // relationship between this kind of Span and a SpanKindConsumer kind. A - // SpanKindProducer Span will end once the message is accepted by the - // message broker which might not overlap with the processing of that - // message. - SpanKindProducer SpanKind = 4 - // SpanKindConsumer is a SpanKind for a Span that represents the operation - // of a consumer receiving a message from a message broker. Like - // SpanKindProducer Spans, there is often no direct relationship between - // this Span and the Span that produced the message. - SpanKindConsumer SpanKind = 5 -) - -// ValidateSpanKind returns a valid span kind value. This will coerce -// invalid values into the default value, SpanKindInternal. -func ValidateSpanKind(spanKind SpanKind) SpanKind { - switch spanKind { - case SpanKindInternal, - SpanKindServer, - SpanKindClient, - SpanKindProducer, - SpanKindConsumer: - // valid - return spanKind - default: - return SpanKindInternal - } -} - -// String returns the specified name of the SpanKind in lower-case. -func (sk SpanKind) String() string { - switch sk { - case SpanKindInternal: - return "internal" - case SpanKindServer: - return "server" - case SpanKindClient: - return "client" - case SpanKindProducer: - return "producer" - case SpanKindConsumer: - return "consumer" - default: - return "unspecified" - } -} - -// Tracer is the creator of Spans. -// -// Warning: methods may be added to this interface in minor releases. -type Tracer interface { - // Start creates a span and a context.Context containing the newly-created span. - // - // If the context.Context provided in `ctx` contains a Span then the newly-created - // Span will be a child of that span, otherwise it will be a root span. This behavior - // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the - // newly-created Span to be a root span even if `ctx` contains a Span. - // - // When creating a Span it is recommended to provide all known span attributes using - // the `WithAttributes()` SpanOption as samplers will only have access to the - // attributes provided when a Span is created. - // - // Any Span that is created MUST also be ended. This is the responsibility of the user. - // Implementations of this API may leak memory or other resources if Spans are not ended. - Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) -} - -// TracerProvider provides Tracers that are used by instrumentation code to -// trace computational workflows. -// -// A TracerProvider is the collection destination of all Spans from Tracers it -// provides, it represents a unique telemetry collection pipeline. How that -// pipeline is defined, meaning how those Spans are collected, processed, and -// where they are exported, depends on its implementation. Instrumentation -// authors do not need to define this implementation, rather just use the -// provided Tracers to instrument code. -// -// Commonly, instrumentation code will accept a TracerProvider implementation -// at runtime from its users or it can simply use the globally registered one -// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). -// -// Warning: methods may be added to this interface in minor releases. -type TracerProvider interface { - // Tracer returns a unique Tracer scoped to be used by instrumentation code - // to trace computational workflows. The scope and identity of that - // instrumentation code is uniquely defined by the name and options passed. - // - // The passed name needs to uniquely identify instrumentation code. - // Therefore, it is recommended that name is the Go package name of the - // library providing instrumentation (note: not the code being - // instrumented). Instrumentation libraries can have multiple versions, - // therefore, the WithInstrumentationVersion option should be used to - // distinguish these different codebases. Additionally, instrumentation - // libraries may sometimes use traces to communicate different domains of - // workflow data (i.e. using spans to communicate workflow events only). If - // this is the case, the WithScopeAttributes option should be used to - // uniquely identify Tracers that handle the different domains of workflow - // data. - // - // If the same name and options are passed multiple times, the same Tracer - // will be returned (it is up to the implementation if this will be the - // same underlying instance of that Tracer or not). It is not necessary to - // call this multiple times with the same name and options to get an - // up-to-date Tracer. All implementations will ensure any TracerProvider - // configuration changes are propagated to all provided Tracers. - // - // If name is empty, then an implementation defined default name will be - // used instead. - // - // This method is safe to call concurrently. - Tracer(name string, options ...TracerOption) Tracer -} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go deleted file mode 100644 index ca68a82e5..000000000 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/trace" - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" -) - -const ( - maxListMembers = 32 - - listDelimiter = "," - - // based on the W3C Trace Context specification, see - // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` - - errInvalidKey errorConst = "invalid tracestate key" - errInvalidValue errorConst = "invalid tracestate value" - errInvalidMember errorConst = "invalid tracestate list-member" - errMemberNumber errorConst = "too many list-members in tracestate" - errDuplicate errorConst = "duplicate list-member in tracestate" -) - -var ( - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) -) - -type member struct { - Key string - Value string -} - -func newMember(key, value string) (member, error) { - if !keyRe.MatchString(key) { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) - } - if !valueRe.MatchString(value) { - return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) - } - return member{Key: key, Value: value}, nil -} - -func parseMember(m string) (member, error) { - matches := memberRe.FindStringSubmatch(m) - if len(matches) != 5 { - return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) - } - - return member{ - Key: matches[1], - Value: matches[4], - }, nil -} - -// String encodes member into a string compliant with the W3C Trace Context -// specification. -func (m member) String() string { - return fmt.Sprintf("%s=%s", m.Key, m.Value) -} - -// TraceState provides additional vendor-specific trace identification -// information across different distributed tracing systems. It represents an -// immutable list consisting of key/value pairs, each pair is referred to as a -// list-member. -// -// TraceState conforms to the W3C Trace Context specification -// (https://www.w3.org/TR/trace-context-1). All operations that create or copy -// a TraceState do so by validating all input and will only produce TraceState -// that conform to the specification. Specifically, this means that all -// list-member's key/value pairs are valid, no duplicate list-members exist, -// and the maximum number of list-members (32) is not exceeded. -type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` - // list is the members in order. - list []member -} - -var _ json.Marshaler = TraceState{} - -// ParseTraceState attempts to decode a TraceState from the passed -// string. It returns an error if the input is invalid according to the W3C -// Trace Context specification. -func ParseTraceState(tracestate string) (TraceState, error) { - if tracestate == "" { - return TraceState{}, nil - } - - wrapErr := func(err error) error { - return fmt.Errorf("failed to parse tracestate: %w", err) - } - - var members []member - found := make(map[string]struct{}) - for _, memberStr := range strings.Split(tracestate, listDelimiter) { - if len(memberStr) == 0 { - continue - } - - m, err := parseMember(memberStr) - if err != nil { - return TraceState{}, wrapErr(err) - } - - if _, ok := found[m.Key]; ok { - return TraceState{}, wrapErr(errDuplicate) - } - found[m.Key] = struct{}{} - - members = append(members, m) - if n := len(members); n > maxListMembers { - return TraceState{}, wrapErr(errMemberNumber) - } - } - - return TraceState{list: members}, nil -} - -// MarshalJSON marshals the TraceState into JSON. -func (ts TraceState) MarshalJSON() ([]byte, error) { - return json.Marshal(ts.String()) -} - -// String encodes the TraceState into a string compliant with the W3C -// Trace Context specification. The returned string will be invalid if the -// TraceState contains any invalid members. -func (ts TraceState) String() string { - members := make([]string, len(ts.list)) - for i, m := range ts.list { - members[i] = m.String() - } - return strings.Join(members, listDelimiter) -} - -// Get returns the value paired with key from the corresponding TraceState -// list-member if it exists, otherwise an empty string is returned. -func (ts TraceState) Get(key string) string { - for _, member := range ts.list { - if member.Key == key { - return member.Value - } - } - - return "" -} - -// Insert adds a new list-member defined by the key/value pair to the -// TraceState. If a list-member already exists for the given key, that -// list-member's value is updated. The new or updated list-member is always -// moved to the beginning of the TraceState as specified by the W3C Trace -// Context specification. -// -// If key or value are invalid according to the W3C Trace Context -// specification an error is returned with the original TraceState. -// -// If adding a new list-member means the TraceState would have more members -// then is allowed, the new list-member will be inserted and the right-most -// list-member will be dropped in the returned TraceState. -func (ts TraceState) Insert(key, value string) (TraceState, error) { - m, err := newMember(key, value) - if err != nil { - return ts, err - } - - cTS := ts.Delete(key) - if cTS.Len()+1 <= maxListMembers { - cTS.list = append(cTS.list, member{}) - } - // When the number of members exceeds capacity, drop the "right-most". - copy(cTS.list[1:], cTS.list) - cTS.list[0] = m - - return cTS, nil -} - -// Delete returns a copy of the TraceState with the list-member identified by -// key removed. -func (ts TraceState) Delete(key string) TraceState { - members := make([]member, ts.Len()) - copy(members, ts.list) - for i, member := range ts.list { - if member.Key == key { - members = append(members[:i], members[i+1:]...) - // TraceState should contain no duplicate members. - break - } - } - return TraceState{list: members} -} - -// Len returns the number of list-members in the TraceState. -func (ts TraceState) Len() int { - return len(ts.list) -} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index dbb61a422..000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go deleted file mode 100644 index 0e8e5e023..000000000 --- a/vendor/go.opentelemetry.io/otel/version.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otel // import "go.opentelemetry.io/otel" - -// Version is the current release version of OpenTelemetry in use. -func Version() string { - return "1.14.0" -} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml deleted file mode 100644 index 40df1fae4..000000000 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright The OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -module-sets: - stable-v1: - version: v1.14.0 - modules: - - go.opentelemetry.io/otel - - go.opentelemetry.io/otel/bridge/opentracing - - go.opentelemetry.io/otel/bridge/opentracing/test - - go.opentelemetry.io/otel/example/fib - - go.opentelemetry.io/otel/example/jaeger - - go.opentelemetry.io/otel/example/namedtracer - - go.opentelemetry.io/otel/example/otel-collector - - go.opentelemetry.io/otel/example/passthrough - - go.opentelemetry.io/otel/example/zipkin - - go.opentelemetry.io/otel/exporters/jaeger - - go.opentelemetry.io/otel/exporters/zipkin - - go.opentelemetry.io/otel/exporters/otlp/otlptrace - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp - - go.opentelemetry.io/otel/exporters/otlp/internal/retry - - go.opentelemetry.io/otel/exporters/stdout/stdouttrace - - go.opentelemetry.io/otel/trace - - go.opentelemetry.io/otel/sdk - experimental-metrics: - version: v0.37.0 - modules: - - go.opentelemetry.io/otel/example/opencensus - - go.opentelemetry.io/otel/example/prometheus - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - - go.opentelemetry.io/otel/exporters/prometheus - - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/metric - - go.opentelemetry.io/otel/sdk/metric - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/view - experimental-schema: - version: v0.0.4 - modules: - - go.opentelemetry.io/otel/schema -excluded-modules: - - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index cff0cd49e..2540bd682 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -104,8 +104,8 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. func Index[E comparable](s []E, v E) int { - for i, vs := range s { - if v == vs { + for i := range s { + if v == s[i] { return i } } @@ -115,8 +115,8 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. func IndexFunc[E any](s []E, f func(E) bool) int { - for i, v := range s { - if f(v) { + for i := range s { + if f(s[i]) { return i } } @@ -207,12 +207,12 @@ func Compact[S ~[]E, E comparable](s S) S { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if v != last { - s[i] = v + for k := 1; k < len(s); k++ { + if s[k] != s[k-1] { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] @@ -224,12 +224,12 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { return s } i := 1 - last := s[0] - for _, v := range s[1:] { - if !eq(v, last) { - s[i] = v + for k := 1; k < len(s); k++ { + if !eq(s[k], s[k-1]) { + if i != k { + s[i] = s[k] + } i++ - last = v } } return s[:i] diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index f14f40da7..231b6448a 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -81,10 +81,12 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { } // BinarySearchFunc works like BinarySearch, but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" is -// defined by cmp. cmp(a, b) is expected to return an integer comparing the two -// parameters: 0 if a == b, a negative number if a < b and a positive number if -// a > b. +// function. The slice must be sorted in increasing order, where "increasing" +// is defined by cmp. cmp should return 0 if the slice element matches +// the target, a negative number if the slice element precedes the target, +// or a positive number if the slice element follows the target. +// cmp must implement the same ordering as the slice, such that if +// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/google.golang.org/genproto/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/runtime/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE similarity index 100% rename from vendor/github.com/go-openapi/runtime/LICENSE rename to vendor/google.golang.org/genproto/googleapis/rpc/LICENSE diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go new file mode 100644 index 000000000..e8789cb33 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -0,0 +1,588 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +// Package fieldmaskpb contains generated types for google/protobuf/field_mask.proto. +// +// The FieldMask message represents a set of symbolic field paths. +// The paths are specific to some target message type, +// which is not stored within the FieldMask message itself. +// +// # Constructing a FieldMask +// +// The New function is used construct a FieldMask: +// +// var messageType *descriptorpb.DescriptorProto +// fm, err := fieldmaskpb.New(messageType, "field.name", "field.number") +// if err != nil { +// ... // handle error +// } +// ... // make use of fm +// +// The "field.name" and "field.number" paths are valid paths according to the +// google.protobuf.DescriptorProto message. Use of a path that does not correlate +// to valid fields reachable from DescriptorProto would result in an error. +// +// Once a FieldMask message has been constructed, +// the Append method can be used to insert additional paths to the path set: +// +// var messageType *descriptorpb.DescriptorProto +// if err := fm.Append(messageType, "options"); err != nil { +// ... // handle error +// } +// +// # Type checking a FieldMask +// +// In order to verify that a FieldMask represents a set of fields that are +// reachable from some target message type, use the IsValid method: +// +// var messageType *descriptorpb.DescriptorProto +// if fm.IsValid(messageType) { +// ... // make use of fm +// } +// +// IsValid needs to be passed the target message type as an input since the +// FieldMask message itself does not store the message type that the set of paths +// are for. +package fieldmaskpb + +import ( + proto "google.golang.org/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sort "sort" + strings "strings" + sync "sync" +) + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is unmappable. +type FieldMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` +} + +// New constructs a field mask from a list of paths and verifies that +// each one is valid according to the specified message type. +func New(m proto.Message, paths ...string) (*FieldMask, error) { + x := new(FieldMask) + return x, x.Append(m, paths...) +} + +// Union returns the union of all the paths in the input field masks. +func Union(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var out []string + out = append(out, mx.GetPaths()...) + out = append(out, my.GetPaths()...) + for _, m := range ms { + out = append(out, m.GetPaths()...) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// Intersect returns the intersection of all the paths in the input field masks. +func Intersect(mx *FieldMask, my *FieldMask, ms ...*FieldMask) *FieldMask { + var ss1, ss2 []string // reused buffers for performance + intersect := func(out, in []string) []string { + ss1 = normalizePaths(append(ss1[:0], in...)) + ss2 = normalizePaths(append(ss2[:0], out...)) + out = out[:0] + for i1, i2 := 0, 0; i1 < len(ss1) && i2 < len(ss2); { + switch s1, s2 := ss1[i1], ss2[i2]; { + case hasPathPrefix(s1, s2): + out = append(out, s1) + i1++ + case hasPathPrefix(s2, s1): + out = append(out, s2) + i2++ + case lessPath(s1, s2): + i1++ + case lessPath(s2, s1): + i2++ + } + } + return out + } + + out := Union(mx, my, ms...).GetPaths() + out = intersect(out, mx.GetPaths()) + out = intersect(out, my.GetPaths()) + for _, m := range ms { + out = intersect(out, m.GetPaths()) + } + return &FieldMask{Paths: normalizePaths(out)} +} + +// IsValid reports whether all the paths are syntactically valid and +// refer to known fields in the specified message type. +// It reports false for a nil FieldMask. +func (x *FieldMask) IsValid(m proto.Message) bool { + paths := x.GetPaths() + return x != nil && numValidPaths(m, paths) == len(paths) +} + +// Append appends a list of paths to the mask and verifies that each one +// is valid according to the specified message type. +// An invalid path is not appended and breaks insertion of subsequent paths. +func (x *FieldMask) Append(m proto.Message, paths ...string) error { + numValid := numValidPaths(m, paths) + x.Paths = append(x.Paths, paths[:numValid]...) + paths = paths[numValid:] + if len(paths) > 0 { + name := m.ProtoReflect().Descriptor().FullName() + return protoimpl.X.NewError("invalid path %q for message %q", paths[0], name) + } + return nil +} + +func numValidPaths(m proto.Message, paths []string) int { + md0 := m.ProtoReflect().Descriptor() + for i, path := range paths { + md := md0 + if !rangeFields(path, func(field string) bool { + // Search the field within the message. + if md == nil { + return false // not within a message + } + fd := md.Fields().ByName(protoreflect.Name(field)) + // The real field name of a group is the message name. + if fd == nil { + gd := md.Fields().ByName(protoreflect.Name(strings.ToLower(field))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && string(gd.Message().Name()) == field { + fd = gd + } + } else if fd.Kind() == protoreflect.GroupKind && string(fd.Message().Name()) != field { + fd = nil + } + if fd == nil { + return false // message has does not have this field + } + + // Identify the next message to search within. + md = fd.Message() // may be nil + + // Repeated fields are only allowed at the last position. + if fd.IsList() || fd.IsMap() { + md = nil + } + + return true + }) { + return i + } + } + return len(paths) +} + +// Normalize converts the mask to its canonical form where all paths are sorted +// and redundant paths are removed. +func (x *FieldMask) Normalize() { + x.Paths = normalizePaths(x.Paths) +} + +func normalizePaths(paths []string) []string { + sort.Slice(paths, func(i, j int) bool { + return lessPath(paths[i], paths[j]) + }) + + // Elide any path that is a prefix match on the previous. + out := paths[:0] + for _, path := range paths { + if len(out) > 0 && hasPathPrefix(path, out[len(out)-1]) { + continue + } + out = append(out, path) + } + return out +} + +// hasPathPrefix is like strings.HasPrefix, but further checks for either +// an exact matche or that the prefix is delimited by a dot. +func hasPathPrefix(path, prefix string) bool { + return strings.HasPrefix(path, prefix) && (len(path) == len(prefix) || path[len(prefix)] == '.') +} + +// lessPath is a lexicographical comparison where dot is specially treated +// as the smallest symbol. +func lessPath(x, y string) bool { + for i := 0; i < len(x) && i < len(y); i++ { + if x[i] != y[i] { + return (x[i] - '.') < (y[i] - '.') + } + } + return len(x) < len(y) +} + +// rangeFields is like strings.Split(path, "."), but avoids allocations by +// iterating over each field in place and calling a iterator function. +func rangeFields(path string, f func(field string) bool) bool { + for { + var field string + if i := strings.IndexByte(path, '.'); i >= 0 { + field, path = path[:i], path[i:] + } else { + field, path = path, "" + } + + if !f(field) { + return false + } + + if len(path) == 0 { + return true + } + path = strings.TrimPrefix(path, ".") + } +} + +func (x *FieldMask) Reset() { + *x = FieldMask{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldMask) ProtoMessage() {} + +func (x *FieldMask) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_field_mask_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead. +func (*FieldMask) Descriptor() ([]byte, []int) { + return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldMask) GetPaths() []string { + if x != nil { + return x.Paths + } + return nil +} + +var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor + +var file_google_protobuf_field_mask_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, + 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_field_mask_proto_rawDescOnce sync.Once + file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc +) + +func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { + file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + }) + return file_google_protobuf_field_mask_proto_rawDescData +} + +var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_field_mask_proto_goTypes = []interface{}{ + (*FieldMask)(nil), // 0: google.protobuf.FieldMask +} +var file_google_protobuf_field_mask_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_field_mask_proto_init() } +func file_google_protobuf_field_mask_proto_init() { + if File_google_protobuf_field_mask_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_field_mask_proto_goTypes, + DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs, + MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, + }.Build() + File_google_protobuf_field_mask_proto = out.File + file_google_protobuf_field_mask_proto_rawDesc = nil + file_google_protobuf_field_mask_proto_goTypes = nil + file_google_protobuf_field_mask_proto_depIdxs = nil +} diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go index 2428963c0..005513f2a 100644 --- a/vendor/k8s.io/klog/v2/contextual.go +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -70,11 +70,14 @@ func SetLogger(logger logr.Logger) { // routing log entries through klogr into klog and then into the actual Logger // backend. func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { - logging.logger = &logger logging.loggerOptions = loggerOptions{} for _, opt := range opts { opt(&logging.loggerOptions) } + logging.logger = &logWriter{ + Logger: logger, + writeKlogBuffer: logging.loggerOptions.writeKlogBuffer, + } } // ContextualLogger determines whether the logger passed to @@ -93,6 +96,22 @@ func FlushLogger(flush func()) LoggerOption { } } +// WriteKlogBuffer sets a callback that will be invoked by klog to write output +// produced by non-structured log calls like Infof. +// +// The buffer will contain exactly the same data that klog normally would write +// into its own output stream(s). In particular this includes the header, if +// klog is configured to write one. The callback then can divert that data into +// its own output streams. The buffer may or may not end in a line break. +// +// Without such a callback, klog will call the logger's Info or Error method +// with just the message string (i.e. no header). +func WriteKlogBuffer(write func([]byte)) LoggerOption { + return func(o *loggerOptions) { + o.writeKlogBuffer = write + } +} + // LoggerOption implements the functional parameter paradigm for // SetLoggerWithOptions. type LoggerOption func(o *loggerOptions) @@ -100,6 +119,13 @@ type LoggerOption func(o *loggerOptions) type loggerOptions struct { contextualLogger bool flush func() + writeKlogBuffer func([]byte) +} + +// logWriter combines a logger (always set) with a write callback (optional). +type logWriter struct { + Logger + writeKlogBuffer func([]byte) } // ClearLogger removes a backing Logger implementation if one was set earlier @@ -152,7 +178,7 @@ func Background() Logger { if logging.loggerOptions.contextualLogger { // Is non-nil because logging.loggerOptions.contextualLogger is // only true if a logger was set. - return *logging.logger + return logging.logger.Logger } return klogLogger diff --git a/vendor/k8s.io/klog/v2/format.go b/vendor/k8s.io/klog/v2/format.go new file mode 100644 index 000000000..63995ca6d --- /dev/null +++ b/vendor/k8s.io/klog/v2/format.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/go-logr/logr" +) + +// Format wraps a value of an arbitrary type and implement fmt.Stringer and +// logr.Marshaler for them. Stringer returns pretty-printed JSON. MarshalLog +// returns the original value with a type that has no special methods, in +// particular no MarshalLog or MarshalJSON. +// +// Wrapping values like that is useful when the value has a broken +// implementation of these special functions (for example, a type which +// inherits String from TypeMeta, but then doesn't re-implement String) or the +// implementation produces output that is less readable or unstructured (for +// example, the generated String functions for Kubernetes API types). +func Format(obj interface{}) interface{} { + return formatAny{Object: obj} +} + +type formatAny struct { + Object interface{} +} + +func (f formatAny) String() string { + var buffer strings.Builder + encoder := json.NewEncoder(&buffer) + encoder.SetIndent("", " ") + if err := encoder.Encode(&f.Object); err != nil { + return fmt.Sprintf("error marshaling %T to JSON: %v", f, err) + } + return buffer.String() +} + +func (f formatAny) MarshalLog() interface{} { + // Returning a pointer to a pointer ensures that zapr doesn't find a + // fmt.Stringer or logr.Marshaler when it checks the type of the + // value. It then falls back to reflection, which dumps the value being + // pointed to (JSON doesn't have pointers). + ptr := &f.Object + return &ptr +} + +var _ fmt.Stringer = formatAny{} +var _ logr.Marshaler = formatAny{} diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index ac88682a2..f325ded5e 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -40,44 +40,33 @@ type Buffer struct { next *Buffer } -// Buffers manages the reuse of individual buffer instances. It is thread-safe. -type Buffers struct { - // mu protects the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - mu sync.Mutex - - // freeList is a list of byte buffers, maintained under mu. - freeList *Buffer +var buffers = sync.Pool{ + New: func() interface{} { + return new(Buffer) + }, } // GetBuffer returns a new, ready-to-use buffer. -func (bl *Buffers) GetBuffer() *Buffer { - bl.mu.Lock() - b := bl.freeList - if b != nil { - bl.freeList = b.next - } - bl.mu.Unlock() - if b == nil { - b = new(Buffer) - } else { - b.next = nil - b.Reset() - } +func GetBuffer() *Buffer { + b := buffers.Get().(*Buffer) + b.Reset() return b } // PutBuffer returns a buffer to the free list. -func (bl *Buffers) PutBuffer(b *Buffer) { +func PutBuffer(b *Buffer) { if b.Len() >= 256 { - // Let big buffers die a natural death. + // Let big buffers die a natural death, without relying on + // sync.Pool behavior. The documentation implies that items may + // get deallocated while stored there ("If the Pool holds the + // only reference when this [= be removed automatically] + // happens, the item might be deallocated."), but + // https://github.com/golang/go/issues/23199 leans more towards + // having such a size limit. return } - bl.mu.Lock() - b.next = bl.freeList - bl.freeList = b - bl.mu.Unlock() + + buffers.Put(b) } // Some custom tiny helper functions to print the log header efficiently. @@ -121,7 +110,8 @@ func (buf *Buffer) someDigits(i, d int) int { return copy(buf.Tmp[i:], buf.Tmp[j:]) } -// FormatHeader formats a log header using the provided file name and line number. +// FormatHeader formats a log header using the provided file name and line number +// and writes it into the buffer. func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { if line < 0 { line = 0 // not a real line number, but acceptable to someDigits @@ -157,3 +147,30 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now buf.Tmp[n+2] = ' ' buf.Write(buf.Tmp[:n+3]) } + +// SprintHeader formats a log header and returns a string. This is a simpler +// version of FormatHeader for use in ktesting. +func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ']' + return string(buf.Tmp[:22]) +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index ad6bf1116..bcdf5f8ee 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -18,12 +18,17 @@ package serialize import ( "bytes" + "encoding/json" "fmt" "strconv" "github.com/go-logr/logr" ) +type textWriter interface { + WriteText(*bytes.Buffer) +} + // WithValues implements LogSink.WithValues. The old key/value pairs are // assumed to be well-formed, the new ones are checked and padded if // necessary. It returns a new slice. @@ -91,11 +96,66 @@ func MergeKVs(first, second []interface{}) []interface{} { return merged } +type Formatter struct { + AnyToStringHook AnyToStringFunc +} + +type AnyToStringFunc func(v interface{}) string + +// MergeKVsInto is a variant of MergeKVs which directly formats the key/value +// pairs into a buffer. +func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + if len(first) == 0 && len(second) == 0 { + // Nothing to do at all. + return + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + for i := 0; i < len(second); i += 2 { + f.KVFormat(b, second[i], second[i+1]) + } + return + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + for i := 0; i < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + f.KVFormat(b, key, first[i+1]) + } + // Round down. + l := len(second) + l = l / 2 * 2 + for i := 1; i < l; i += 2 { + f.KVFormat(b, second[i-1], second[i]) + } + if len(second)%2 == 1 { + f.KVFormat(b, second[len(second)-1], missingValue) + } +} + +func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + Formatter{}.MergeAndFormatKVs(b, first, second) +} + const missingValue = "(MISSING)" // KVListFormat serializes all key/value pairs into the provided buffer. // A space gets inserted before the first pair and between each pair. -func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { +func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { for i := 0; i < len(keysAndValues); i += 2 { var v interface{} k := keysAndValues[i] @@ -104,67 +164,104 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { } else { v = missingValue } - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } + f.KVFormat(b, k, v) + } +} + +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + Formatter{}.KVListFormat(b, keysAndValues...) +} + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { case string: - writeStringValue(b, true, v) - case error: - writeStringValue(b, true, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, true, value) - default: - writeStringValue(b, false, fmt.Sprintf("%+v", value)) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) + writeStringValue(b, value) default: - writeStringValue(b, false, fmt.Sprintf("%+v", v)) + f.formatAny(b, value) } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +func KVFormat(b *bytes.Buffer, k, v interface{}) { + Formatter{}.KVFormat(b, k, v) +} + +// formatAny is the fallback formatter for a value. It supports a hook (for +// example, for YAML encoding) and itself uses JSON encoding. +func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { + b.WriteRune('=') + if f.AnyToStringHook != nil { + b.WriteString(f.AnyToStringHook(v)) + return + } + encoder := json.NewEncoder(b) + l := b.Len() + if err := encoder.Encode(v); err != nil { + // This shouldn't happen. We discard whatever the encoder + // wrote and instead dump an error string. + b.Truncate(l) + b.WriteString(fmt.Sprintf(`""`, err)) + return } + // Remove trailing newline. + b.Truncate(b.Len() - 1) } // StringerToString converts a Stringer to a string, @@ -203,18 +300,23 @@ func ErrorToString(err error) (ret string) { return } -func writeStringValue(b *bytes.Buffer, quote bool, v string) { +func writeTextWriterValue(b *bytes.Buffer, v textWriter) { + b.WriteByte('=') + defer func() { + if err := recover(); err != nil { + fmt.Fprintf(b, `""`, err) + } + }() + v.WriteText(b) +} + +func writeStringValue(b *bytes.Buffer, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') if index == -1 { b.WriteByte('=') - if quote { - // Simple string, quote quotation marks and non-printable characters. - b.WriteString(strconv.Quote(v)) - return - } - // Non-string with no line breaks. - b.WriteString(v) + // Simple string, quote quotation marks and non-printable characters. + b.WriteString(strconv.Quote(v)) return } diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index 2c218f698..786af74bf 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -17,8 +17,10 @@ limitations under the License. package klog import ( + "bytes" "fmt" "reflect" + "strings" "github.com/go-logr/logr" ) @@ -31,11 +33,30 @@ type ObjectRef struct { func (ref ObjectRef) String() string { if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + var builder strings.Builder + builder.Grow(len(ref.Namespace) + len(ref.Name) + 1) + builder.WriteString(ref.Namespace) + builder.WriteRune('/') + builder.WriteString(ref.Name) + return builder.String() } return ref.Name } +func (ref ObjectRef) WriteText(out *bytes.Buffer) { + out.WriteRune('"') + ref.writeUnquoted(out) + out.WriteRune('"') +} + +func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) { + if ref.Namespace != "" { + out.WriteString(ref.Namespace) + out.WriteRune('/') + } + out.WriteString(ref.Name) +} + // MarshalLog ensures that loggers with support for structured output will log // as a struct by removing the String method via a custom type. func (ref ObjectRef) MarshalLog() interface{} { @@ -117,31 +138,31 @@ var _ fmt.Stringer = kobjSlice{} var _ logr.Marshaler = kobjSlice{} func (ks kobjSlice) String() string { - objectRefs, err := ks.process() - if err != nil { - return err.Error() + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr } return fmt.Sprintf("%v", objectRefs) } func (ks kobjSlice) MarshalLog() interface{} { - objectRefs, err := ks.process() - if err != nil { - return err.Error() + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr } return objectRefs } -func (ks kobjSlice) process() ([]interface{}, error) { +func (ks kobjSlice) process() (objs []interface{}, err string) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: // nil parameter, print as nil. - return nil, nil + return nil, "" case reflect.Slice: // Okay, handle below. default: - return nil, fmt.Errorf("", ks.arg) + return nil, fmt.Sprintf("", ks.arg) } objectRefs := make([]interface{}, 0, s.Len()) for i := 0; i < s.Len(); i++ { @@ -151,8 +172,41 @@ func (ks kobjSlice) process() ([]interface{}, error) { } else if v, ok := item.(KMetadata); ok { objectRefs = append(objectRefs, KObj(v)) } else { - return nil, fmt.Errorf("", item) + return nil, fmt.Sprintf("", item) + } + } + return objectRefs, "" +} + +var nilToken = []byte("null") + +func (ks kobjSlice) WriteText(out *bytes.Buffer) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as null. + out.Write(nilToken) + return + case reflect.Slice: + // Okay, handle below. + default: + fmt.Fprintf(out, `""`, ks.arg) + return + } + out.Write([]byte{'['}) + defer out.Write([]byte{']'}) + for i := 0; i < s.Len(); i++ { + if i > 0 { + out.Write([]byte{','}) + } + item := s.Index(i).Interface() + if item == nil { + out.Write(nilToken) + } else if v, ok := item.(KMetadata); ok { + KObj(v).WriteText(out) + } else { + fmt.Fprintf(out, `""`, item) + return } } - return objectRefs, nil } diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 1bd11b675..152f8a6bd 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -91,8 +91,6 @@ import ( "sync/atomic" "time" - "github.com/go-logr/logr" - "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/clock" "k8s.io/klog/v2/internal/dbg" @@ -453,7 +451,7 @@ type settings struct { // logger is the global Logger chosen by users of klog, nil if // none is available. - logger *Logger + logger *logWriter // loggerOptions contains the options that were supplied for // globalLogger. @@ -525,6 +523,11 @@ func (s settings) deepCopy() settings { } s.vmodule.filter = filter + if s.logger != nil { + logger := *s.logger + s.logger = &logger + } + return s } @@ -532,11 +535,6 @@ func (s settings) deepCopy() settings { type loggingT struct { settings - // bufferCache maintains the free list. It uses its own mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - bufferCache buffer.Buffers - // flushD holds a flushDaemon that frequently flushes log file buffers. // Uses its own mutex. flushD *flushDaemon @@ -664,7 +662,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin // formatHeader formats a log header using the provided file name and line number. func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { - buf := l.bufferCache.GetBuffer() + buf := buffer.GetBuffer() if l.skipHeaders { return buf } @@ -673,17 +671,18 @@ func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buf return buf } -func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printlnDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logger is set, we clear the generated header as we rely on the backing - // logger implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -692,17 +691,18 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -714,17 +714,18 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { +func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) { l.printfDepth(s, logger, filter, 1, format, args...) } -func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { +func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -739,13 +740,14 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -758,7 +760,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -770,7 +772,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -785,7 +787,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. - b := l.bufferCache.GetBuffer() + b := buffer.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. @@ -796,7 +798,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, keysAndValues...) l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. - l.bufferCache.PutBuffer(b) + buffer.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -851,7 +853,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { var isLocked = true l.mu.Lock() defer func() { @@ -867,13 +869,17 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf } } data := buf.Bytes() - if log != nil { - // TODO: set 'severity' and caller information as structured log info - // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == severity.ErrorLog { - logging.logger.WithCallDepth(depth+3).Error(nil, string(data)) + if logger != nil { + if logger.writeKlogBuffer != nil { + logger.writeKlogBuffer(data) } else { - log.WithCallDepth(depth + 3).Info(string(data)) + // TODO: set 'severity' and caller information as structured log info + // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} + if s == severity.ErrorLog { + logger.WithCallDepth(depth+3).Error(nil, string(data)) + } else { + logger.WithCallDepth(depth + 3).Info(string(data)) + } } } else if l.toStderr { os.Stderr.Write(data) @@ -948,7 +954,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf timeoutFlush(ExitFlushTimeout) OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.bufferCache.PutBuffer(buf) + buffer.PutBuffer(buf) if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) @@ -1222,6 +1228,19 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the klog logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, ok := severity.ByName(name) + if !ok { + panic(fmt.Sprintf("klog.NewStandardLogger(%q): unknown severity", name)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. type logBridge severity.Severity @@ -1282,7 +1301,7 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr *logr.Logger + logger *logWriter } func newVerbose(level Level, b bool) Verbose { @@ -1290,7 +1309,7 @@ func newVerbose(level Level, b bool) Verbose { return Verbose{b, nil} } v := logging.logger.V(int(level)) - return Verbose{b, &v} + return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}} } // V reports whether verbosity at the call site is at least the requested level. @@ -1313,6 +1332,13 @@ func newVerbose(level Level, b bool) Verbose { // less than or equal to the value of the -vmodule pattern matching the source file // containing the call. func V(level Level) Verbose { + return VDepth(1, level) +} + +// VDepth is a variant of V that accepts a number of stack frames that will be +// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to +// V(). +func VDepth(depth int, level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. @@ -1329,7 +1355,7 @@ func V(level Level) Verbose { // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { + if runtime.Callers(2+depth, logging.pcs[:]) == 0 { return newVerbose(level, false) } // runtime.Callers returns "return PCs", but we want @@ -1357,7 +1383,7 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(severity.InfoLog, v.logr, logging.filter, args...) + logging.print(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1365,7 +1391,7 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoDepth(depth int, args ...interface{}) { if v.enabled { - logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1373,7 +1399,7 @@ func (v Verbose) InfoDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(severity.InfoLog, v.logr, logging.filter, args...) + logging.println(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1381,7 +1407,7 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfolnDepth(depth int, args ...interface{}) { if v.enabled { - logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1389,7 +1415,7 @@ func (v Verbose) InfolnDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...) } } @@ -1397,7 +1423,7 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { if v.enabled { - logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...) } } @@ -1405,7 +1431,7 @@ func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...) } } @@ -1419,14 +1445,14 @@ func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, args...) + logging.errorS(err, v.logger, logging.filter, 0, msg, args...) } } @@ -1434,7 +1460,7 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...) } } diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 027a4014a..15de00e21 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -42,19 +42,21 @@ func (l *klogger) Init(info logr.RuntimeInfo) { l.callDepth += info.CallDepth } -func (l klogger) Info(level int, msg string, kvList ...interface{}) { +func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) + // Skip this function. + VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } -func (l klogger) Enabled(level int) bool { - return V(Level(level)).Enabled() +func (l *klogger) Enabled(level int) bool { + // Skip this function and logr.Logger.Info where Enabled is called. + return VDepth(l.callDepth+2, Level(level)).Enabled() } -func (l klogger) Error(err error, msg string, kvList ...interface{}) { +func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg diff --git a/vendor/modules.txt b/vendor/modules.txt index 3e7507c54..6600d84d1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,48 +1,22 @@ -# github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d +# github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/beorn7/perks v1.0.1 -## explicit; go 1.11 -github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/cespare/xxhash/v2 v2.2.0 -## explicit; go 1.11 -github.com/cespare/xxhash/v2 -# github.com/cilium/cilium v1.14.0-snapshot.1 +# github.com/cilium/cilium v1.14.0-snapshot.3.0.20230614154348-87ac446b902c ## explicit; go 1.20 -github.com/cilium/cilium/api/v1/client -github.com/cilium/cilium/api/v1/client/daemon -github.com/cilium/cilium/api/v1/client/endpoint -github.com/cilium/cilium/api/v1/client/ipam -github.com/cilium/cilium/api/v1/client/metrics -github.com/cilium/cilium/api/v1/client/policy -github.com/cilium/cilium/api/v1/client/prefilter -github.com/cilium/cilium/api/v1/client/recorder -github.com/cilium/cilium/api/v1/client/service github.com/cilium/cilium/api/v1/flow -github.com/cilium/cilium/api/v1/health/client -github.com/cilium/cilium/api/v1/health/client/connectivity -github.com/cilium/cilium/api/v1/health/client/restapi -github.com/cilium/cilium/api/v1/health/models github.com/cilium/cilium/api/v1/models github.com/cilium/cilium/api/v1/observer github.com/cilium/cilium/api/v1/peer github.com/cilium/cilium/api/v1/recorder github.com/cilium/cilium/api/v1/relay -github.com/cilium/cilium/pkg/api github.com/cilium/cilium/pkg/cidr -github.com/cilium/cilium/pkg/client github.com/cilium/cilium/pkg/clustermesh/types github.com/cilium/cilium/pkg/command -github.com/cilium/cilium/pkg/common -github.com/cilium/cilium/pkg/components github.com/cilium/cilium/pkg/container github.com/cilium/cilium/pkg/defaults -github.com/cilium/cilium/pkg/endpoint/id -github.com/cilium/cilium/pkg/health/client -github.com/cilium/cilium/pkg/health/defaults github.com/cilium/cilium/pkg/hubble/api/v1 github.com/cilium/cilium/pkg/hubble/filters github.com/cilium/cilium/pkg/hubble/k8s @@ -59,13 +33,10 @@ github.com/cilium/cilium/pkg/lock github.com/cilium/cilium/pkg/logging github.com/cilium/cilium/pkg/logging/logfields github.com/cilium/cilium/pkg/mac -github.com/cilium/cilium/pkg/metrics github.com/cilium/cilium/pkg/monitor/api github.com/cilium/cilium/pkg/monitor/notifications github.com/cilium/cilium/pkg/option -github.com/cilium/cilium/pkg/safeio -github.com/cilium/cilium/pkg/safetime -github.com/cilium/cilium/pkg/spanstat +github.com/cilium/cilium/pkg/slices github.com/cilium/cilium/pkg/version github.com/cilium/cilium/pkg/versioncheck # github.com/davecgh/go-spew v1.1.1 @@ -77,13 +48,9 @@ github.com/fatih/color # github.com/fsnotify/fsnotify v1.6.0 ## explicit; go 1.16 github.com/fsnotify/fsnotify -# github.com/go-logr/logr v1.2.3 +# github.com/go-logr/logr v1.2.4 ## explicit; go 1.16 github.com/go-logr/logr -github.com/go-logr/logr/funcr -# github.com/go-logr/stdr v1.2.2 -## explicit; go 1.16 -github.com/go-logr/stdr # github.com/go-ole/go-ole v1.2.6 ## explicit; go 1.12 github.com/go-ole/go-ole @@ -110,22 +77,11 @@ github.com/go-openapi/jsonreference/internal # github.com/go-openapi/loads v0.21.2 ## explicit; go 1.13 github.com/go-openapi/loads -# github.com/go-openapi/runtime v0.25.0 -## explicit; go 1.18 -github.com/go-openapi/runtime -github.com/go-openapi/runtime/client -github.com/go-openapi/runtime/logger -github.com/go-openapi/runtime/middleware -github.com/go-openapi/runtime/middleware/denco -github.com/go-openapi/runtime/middleware/header -github.com/go-openapi/runtime/middleware/untyped -github.com/go-openapi/runtime/security -github.com/go-openapi/runtime/yamlpc -# github.com/go-openapi/spec v0.20.8 +# github.com/go-openapi/spec v0.20.9 ## explicit; go 1.13 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.21.5 -## explicit; go 1.13 +# github.com/go-openapi/strfmt v0.21.7 +## explicit; go 1.19 github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.22.3 ## explicit; go 1.18 @@ -195,9 +151,6 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.17 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/matttproud/golang_protobuf_extensions v1.0.2 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure @@ -210,11 +163,6 @@ github.com/modern-go/reflect2 # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b -## explicit; go 1.14 -github.com/opentracing/opentracing-go -github.com/opentracing/opentracing-go/ext -github.com/opentracing/opentracing-go/log # github.com/pelletier/go-toml/v2 v2.0.8 ## explicit; go 1.16 github.com/pelletier/go-toml/v2 @@ -231,29 +179,10 @@ github.com/pmezard/go-difflib/difflib # github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c ## explicit; go 1.14 github.com/power-devops/perfstat -# github.com/prometheus/client_golang v1.14.0 -## explicit; go 1.17 -github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/collectors -github.com/prometheus/client_golang/prometheus/internal -github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.3.0 -## explicit; go 1.9 -github.com/prometheus/client_model/go -# github.com/prometheus/common v0.37.0 -## explicit; go 1.16 -github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -github.com/prometheus/common/model -# github.com/prometheus/procfs v0.9.0 -## explicit; go 1.18 -github.com/prometheus/procfs -github.com/prometheus/procfs/internal/fs -github.com/prometheus/procfs/internal/util # github.com/sasha-s/go-deadlock v0.3.1 ## explicit github.com/sasha-s/go-deadlock -# github.com/shirou/gopsutil/v3 v3.23.2 +# github.com/shirou/gopsutil/v3 v3.23.5 ## explicit; go 1.15 github.com/shirou/gopsutil/v3/internal/common github.com/shirou/gopsutil/v3/mem @@ -302,18 +231,18 @@ github.com/tklauser/go-sysconf # github.com/tklauser/numcpus v0.6.0 ## explicit; go 1.13 github.com/tklauser/numcpus -# github.com/vishvananda/netlink v1.2.1-beta.2.0.20220608195807-1a118fe229fc +# github.com/vishvananda/netlink v1.2.1-beta.2.0.20230420174744-55c8b9515a01 ## explicit; go 1.12 github.com/vishvananda/netlink github.com/vishvananda/netlink/nl # github.com/vishvananda/netns v0.0.4 ## explicit; go 1.17 github.com/vishvananda/netns -# github.com/yusufpapurcu/wmi v1.2.2 +# github.com/yusufpapurcu/wmi v1.2.3 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.mongodb.org/mongo-driver v1.10.0 -## explicit; go 1.10 +# go.mongodb.org/mongo-driver v1.11.3 +## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec go.mongodb.org/mongo-driver/bson/bsonoptions @@ -321,24 +250,8 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opentelemetry.io/otel v1.14.0 -## explicit; go 1.18 -go.opentelemetry.io/otel -go.opentelemetry.io/otel/attribute -go.opentelemetry.io/otel/baggage -go.opentelemetry.io/otel/codes -go.opentelemetry.io/otel/internal -go.opentelemetry.io/otel/internal/attribute -go.opentelemetry.io/otel/internal/baggage -go.opentelemetry.io/otel/internal/global -go.opentelemetry.io/otel/propagation -go.opentelemetry.io/otel/semconv/internal -go.opentelemetry.io/otel/semconv/v1.12.0 -# go.opentelemetry.io/otel/trace v1.14.0 -## explicit; go 1.18 -go.opentelemetry.io/otel/trace -# golang.org/x/exp v0.0.0-20230321023759-10a507213a29 -## explicit; go 1.18 +# golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 +## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices # golang.org/x/net v0.10.0 @@ -349,7 +262,7 @@ golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sync v0.1.0 +# golang.org/x/sync v0.2.0 ## explicit golang.org/x/sync/semaphore # golang.org/x/sys v0.9.0 @@ -367,7 +280,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status # google.golang.org/grpc v1.56.0 @@ -455,6 +368,7 @@ google.golang.org/protobuf/runtime/protoimpl google.golang.org/protobuf/types/descriptorpb google.golang.org/protobuf/types/known/anypb google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/fieldmaskpb google.golang.org/protobuf/types/known/timestamppb google.golang.org/protobuf/types/known/wrapperspb # gopkg.in/inf.v0 v0.9.1 @@ -469,7 +383,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/apimachinery v0.26.0 +# k8s.io/apimachinery v0.26.3 ## explicit; go 1.19 k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/v1 @@ -492,11 +406,11 @@ k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v0.26.0 +# k8s.io/client-go v0.26.3 ## explicit; go 1.19 k8s.io/client-go/third_party/forked/golang/template k8s.io/client-go/util/jsonpath -# k8s.io/klog/v2 v2.80.1 +# k8s.io/klog/v2 v2.100.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer